diff --git a/.buildkite/hooks/pre-checkout b/.buildkite/hooks/pre-checkout
new file mode 100755
index 0000000000..c19519b3b6
--- /dev/null
+++ b/.buildkite/hooks/pre-checkout
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+set -eo pipefail
+
+echo "--- :git: cleaning checkout"
+chmod -R +w ./_tools
diff --git a/.buildkite/hooks/pre-exit b/.buildkite/hooks/pre-exit
index 8b4c59cdd3..497ac79f51 100644
--- a/.buildkite/hooks/pre-exit
+++ b/.buildkite/hooks/pre-exit
@@ -3,4 +3,5 @@
set -eo pipefail
echo "--- :git: cleaning checkout"
+chmod -R +w ./_tools || true
git clean -dffx
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index 490570947f..c71d836da5 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -14,7 +14,7 @@ steps:
command: make clean install-vendor-m3 test-all-gen
env:
CGO_ENABLED: 0
- GIMME_GO_VERSION: 1.12.x
+ GIMME_GO_VERSION: 1.13.x
plugins:
gopath-checkout#v1.0.1:
import: github.com/m3db/m3
@@ -90,7 +90,7 @@ steps:
command: make clean install-vendor-m3 docs-test
env:
CGO_ENABLED: 0
- GIMME_GO_VERSION: 1.12.x
+ GIMME_GO_VERSION: 1.13.x
plugins:
gopath-checkout#v1.0.1:
import: github.com/m3db/m3
diff --git a/.buildkite/scripts/docs_push.sh b/.buildkite/scripts/docs_push.sh
index 439e13d2f1..63e4416340 100755
--- a/.buildkite/scripts/docs_push.sh
+++ b/.buildkite/scripts/docs_push.sh
@@ -17,13 +17,13 @@ rm -rf site
# NB(schallert): if updating this build step or the one below be sure to update
# the docs-build make target (see note there as to why we can't share code
# between the two).
-mkdocs build -e docs/theme -t material
+mkdocs build -t material
mkdocs gh-deploy --force --dirty
# We do two builds to ensure any behavior of gh-deploy doesn't impact the second
# build.
rm -rf site
-mkdocs build -e docs/theme -t material
+mkdocs build -t material
git checkout -t origin/docs
# Trying to commit 0 changes would fail, so let's check if there's any changes
@@ -48,7 +48,7 @@ git clone git@github.com:m3db/m3db-operator.git
(
cd m3db-operator
- mkdocs build -e docs/theme -t material
+ mkdocs build -t material
)
if diff -qr m3db-operator/site m3db.io; then
diff --git a/.ci b/.ci
index ec7ca2ce8d..96907c2669 160000
--- a/.ci
+++ b/.ci
@@ -1 +1 @@
-Subproject commit ec7ca2ce8dfbf89017ea9f7bff7be819e2279eb1
+Subproject commit 96907c2669187b166eead31d9e9a5bc4fcbb9b52
diff --git a/.codecov.yml b/.codecov.yml
index de9ebbf9b1..31dadc9239 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -5,31 +5,57 @@ coverage:
status:
project:
- default: on
+ default:
+ target: auto
+ threshold: 5%
x:
+ target: auto
+ threshold: 5%
flags: x
cluster:
+ target: auto
+ threshold: 5%
flags: cluster
msg:
+ target: auto
+ threshold: 5%
flags: msg
metrics:
+ target: auto
+ threshold: 5%
flags: metrics
aggregator:
+ target: auto
+ threshold: 5%
flags: aggregator
collector:
+ target: auto
+ threshold: 5%
flags: collector
query:
+ target: auto
+ threshold: 5%
flags: query
dbnode:
+ target: auto
+ threshold: 5%
flags: dbnode
m3ninx:
+ target: auto
+ threshold: 5%
flags: m3ninx
m3nsch:
+ target: auto
+ threshold: 5%
flags: m3nsch
m3em:
+ target: auto
+ threshold: 5%
flags: m3em
patch:
- default: on
+ default:
+ target: auto
+ threshold: 5%
changes:
default: off
diff --git a/.fossa.yml b/.fossa.yml
index f495e83c0a..45a9f148c6 100755
--- a/.fossa.yml
+++ b/.fossa.yml
@@ -42,10 +42,10 @@ analyze:
path: src/cmd/services/m3coordinator/main
options:
allow-unresolved: true
- - name: github.com/m3db/m3/src/cmd/services/m3ctl/main
+ - name: github.com/m3db/m3/src/cmd/services/r2ctl/main
type: go
- target: github.com/m3db/m3/src/cmd/services/m3ctl/main
- path: src/cmd/services/m3ctl/main
+ target: github.com/m3db/m3/src/cmd/services/r2ctl/main
+ path: src/cmd/services/r2ctl/main
options:
allow-unresolved: true
- name: github.com/m3db/m3/src/cmd/services/m3dbnode/main
diff --git a/.gitignore b/.gitignore
index f17190a63e..b887759df3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,7 +14,7 @@
.DS_Store
test.log
-# glide manages this
+# go modules manages this
vendor/
# Build binaries
@@ -52,7 +52,7 @@ yarn-error.log*
# Used to serve m3db.io site
!m3db.io/**/*.html
!m3db.io/**/*.xml
-# glide does not manage this
+# go modules does not manage this
!m3db.io/**/vendor
# Automatically populated from asset sources
m3db.io/openapi
diff --git a/.goreleaser.yml b/.goreleaser.yml
index 57ef2289db..aa503c4abc 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -55,6 +55,8 @@ builds:
- darwin
# Issue #692 tracks Windows support.
# - windows
+ goarch:
+ - amd64
- id: m3aggregator
main: ./src/cmd/services/m3aggregator/main/main.go
binary: m3aggregator
@@ -74,14 +76,15 @@ builds:
# - windows
goarch:
- amd64
-archive:
- wrap_in_directory: true
- format_overrides:
- - goos: windows
- format: zip
- files:
- - LICENSE
- - README.md
+archives:
+ - format: tar.gz
+ wrap_in_directory: true
+ format_overrides:
+ - goos: windows
+ format: zip
+ files:
+ - LICENSE
+ - README.md
release:
github:
owner: m3db
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 253cd5ba85..525dac6dd1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,69 +1,196 @@
# Changelog
-# 0.15.0-rc.1
+# 0.15.12
+
+## Bug Fixes
+
+- **M3Query**: Fix to Graphite movingMedian and movingAverage functions that could skip data in certain cases or cause an out of bounds error after recovery ([#2549](https://github.com/m3db/m3/pull/2549))
+- **M3Coordinator**: Fix a Graphite carbon ingest tag lifecycle bug that could cause duplicate tags ([#2549](https://github.com/m3db/m3/pull/2549))
+
+# 0.15.11
## Features
-- **M3Coordinator**: Add public API to write annotations (i.e. arbitrary bytes), next to datapoints for things like storing exemplars ([#2022](https://github.com/m3db/m3/pull/2022), [#2029](https://github.com/m3db/m3/pull/2029), [#2031](https://github.com/m3db/m3/pull/2031))
-- **M3Coordinator**: Add support for mapping rules, allowing metrics to be stored at different resolutions based on their labels/tags ([#2036](https://github.com/m3db/m3/pull/2036))
-- **M3Coordinator**: Add Graphite mapping rule support ([#2060](https://github.com/m3db/m3/pull/2060)) ([#2063](https://github.com/m3db/m3/pull/2063))
-- **M3Coordinator**: Add community contributed InfluxDB write endpoint (at /api/v1/influxdb/write) ([#2083](https://github.com/m3db/m3/pull/2083))
-- **M3Query**: Add header to support enforcing all queries in request to implicitly always include a given label/tag matcher ([#2053](https://github.com/m3db/m3/pull/2053))
-- **M3Query**: Return headers indicating incomplete results for cross-regional fanout queries when remote fails or hits a limit ([#2053](https://github.com/m3db/m3/pull/2053))
-- **M3Query**: Refactor query server to allow for custom handlers ([#2073](https://github.com/m3db/m3/pull/2073))
+- **M3Coordinator**: Support for remapping rules and provide tags that are auto-appended to metrics when aggregations are applied ([#2414](https://github.com/m3db/m3/pull/2414))
+
+## Bug Fixes
+
+- **M3DB**: Extend lifetime of compactable index segments for aggregate queries ([#2550](https://github.com/m3db/m3/pull/2550))
+
+# 0.15.10
+
+## Features
+
+- **M3DB**: Add migration task for filesets from v1.0 to v1.1 ([#2520](https://github.com/m3db/m3/pull/2520))
+
+## Bug Fixes
+
+- **M3DB**: Fix enqueue readers info file reading ([#2546](https://github.com/m3db/m3/pull/2546))
+
+## Documentation
+
+- **All**: Fix buildkite mkdocs script ([#2538](https://github.com/m3db/m3/pull/2538))
+
+# 0.15.9
## Performance
-- **M3DB**: Improve RSS memory management with madvise resulting in flat RSS usage with a steady workload as time passes block-over-block ([#2037](https://github.com/m3db/m3/pull/2037))
-- **M3DB**: Improve bootstrapping performance by allowing bootstrapping to be performed in a single pass, now possible for a lot of bootstraps to take just minutes depending on retention ([#1989](https://github.com/m3db/m3/pull/1989))
-- **M3DB**: Use zero-copy references to index data instead of copy-on-read index data for each query, substantially improving query throughput and performance ([#1839](https://github.com/m3db/m3/pull/1839))
-- **M3DB**: Further improve peer bootstrapping performance by using a document builder rather than raw memory segments ([#2078](https://github.com/m3db/m3/pull/2078))
-- **M3Query**: Substantially improve temporal function performance ([#2049](https://github.com/m3db/m3/pull/2049))
+- **M3DB**: Background cold flush process to no longer block data snapshotting or commit log rotation ([#2508](https://github.com/m3db/m3/pull/2508))
+- **M3DB**: Avoid sorting index entries when reading data filesets during bootstrap when not required ([#2533](https://github.com/m3db/m3/pull/2533))
## Bug Fixes
-- **M3Query**: Fix edge cases with cross-zonal query fanout and add verify utility ([#1993](https://github.com/m3db/m3/pull/1993))
-- **M3DB**: Validate indexable metrics for valid utf-8 prior to insert, also includes a utility for earlier M3DB versions to remove non-utf8 index data ([#2046](https://github.com/m3db/m3/pull/2046))
-- **M3DB**: Remove incorrect error log message for missing schema with default non-protobuf namespaces ([#2013](https://github.com/m3db/m3/pull/2013))
-- **M3DB**: Fixed memory leak causing index blocks to remain in memory after flushing ([#2037](https://github.com/m3db/m3/pull/2037))
-- **M3DB**: Fix long standing possibility of double RLock acqusition ([#2128](https://github.com/m3db/m3/pull/2128))
-- **M3DB**: Remove loop in fileset writer when previous fileset encountered an error writing out index files ([#2058](https://github.com/m3db/m3/pull/2058))
-- **M3DB**: Instead of encountering an error skip entries for unowned shards in commit log bootstrapper ([#2145](https://github.com/m3db/m3/pull/2145))
-- **M3Coordinator**: Respect env and zone headers for topic API endpoints ([#2159](https://github.com/m3db/m3/pull/2159))
+- **M3Coordinator**: Respect M3Cluster headers in namespace GET ([#2518](https://github.com/m3db/m3/pull/#2518))
-## Documentation
+## Documentation
-- **M3DB**: Add documentation about estimating number of unique time series ([#2062](https://github.com/m3db/m3/pull/2062))
-- **M3DB**: Update namespace configuration documentation to use simpler duration specific keys ([#2045](https://github.com/m3db/m3/pull/2045))
-- **M3Aggregator**: Add M3 aggregator Grafana dashboard ([#2064](https://github.com/m3db/m3/pull/2064))
+- **M3Aggregator**: Add M3Aggregator documentation ([#1741](https://github.com/m3db/m3/pull/1741), [#2529](https://github.com/m3db/m3/pull/2529))
+- **M3DB**: Bootstrapper documentation fixes ([#2510](https://github.com/m3db/m3/pull/2510))
+- **All**: Update mkdocs ([#2524](https://github.com/m3db/m3/pull/2524), [#2527](https://github.com/m3db/m3/pull/2527))
+- **All**: Add M3 meetup recordings ([#2495](https://github.com/m3db/m3/pull/2524), [#2527](https://github.com/m3db/m3/pull/2495))
+- **All**: Update Twitter link ([#2530](https://github.com/m3db/m3/pull/2530))
+- **All**: Fix spelling in FAQ ([#2448](https://github.com/m3db/m3/pull/2448))
## Misc
-- **All**: Add gauge metrics to measure the number of active routines for any worker pool ([#2061](https://github.com/m3db/m3/pull/2061))
-- **All**: Allow for ${ENV_VAR_NAME} expansion with YAML configuration files ([#2033](https://github.com/m3db/m3/pull/2033))
-- **All**: Add a utility for comparing performance and correctness across different versions of M3DB, enabling diffing the perf of different versions ([#2044](https://github.com/m3db/m3/pull/2044))
-- **All**: Upgrade etcd client library to 3.4.3 ([#2101](https://github.com/m3db/m3/pull/2101))
-- **All**: Include key name in watch errors ([#2138](https://github.com/m3db/m3/pull/2138))
-- **M3DB**: Add latency metrics to remote reads ([#2027](https://github.com/m3db/m3/pull/2027))
-- **M3DB**: Add metrics for async replication worker pool utilization ([#2059](https://github.com/m3db/m3/pull/2059))
-- **M3DB**: Remove carbon debug flag and rely on log debug level for debugging Carbon/Graphite mapping rules ([#2024](https://github.com/m3db/m3/pull/2024))
-- **M3Query**: Allow both GET and POST for query APIs ([#2055](https://github.com/m3db/m3/pull/2055))
-- **M3Coordinator**: Add power user API to custom set placement goal state for cluster membership and shards ([#2108](https://github.com/m3db/m3/pull/2108))
-- **M3Coordinator**: Delete M3 aggregator related etcd keys when aggregator placement deleted ([#2133](https://github.com/m3db/m3/pull/2133))
-- **M3Coordinator**: Add metrics for remote aggregator client and downsampler ([#2165](https://github.com/m3db/m3/pull/2165))
-- **M3Coordinator**: Add aggregator client maxBatchSize config for configuring buffer for data sent to aggregator ([#2166](https://github.com/m3db/m3/pull/2166))
+- **M3DB**: Add bootstrap migration config and options ([#2519](https://github.com/m3db/m3/pull/2519))
+
+# 0.15.8
+
+## Misc
+
+- **M3DB**: Pause rollout of background cold flush process by revert until further testing ([6830a8cb4](https://github.com/m3db/m3/commit/6830a8cb4))
+
+# 0.15.7
+
+## Performance
+
+- **M3DB**: Background cold flush process to no longer block data snapshotting or commit log rotation ([#2460](https://github.com/m3db/m3/pull/2460))
+- **M3DB**: Validate individual index entries on decode instead of entire file on open, to further improve bootstrap speed ([#2468](https://github.com/m3db/m3/pull/2468))
+
+## Bug Fixes
+
+- **M3DB**: Strict JSON unmarshal (disallow unknown fields) for raw HTTP/JSON DB node APIs ([#2490](https://github.com/m3db/m3/pull/2490))
+- **M3Query**: Fix to regex selectors with leading wildcard ([#2505](https://github.com/m3db/m3/pull/#2505))
+
+## Documentation
-# 0.15.0-rc.0
+- **All**: Links to M3 meetup recordings ([#2494](https://github.com/m3db/m3/pull/2494))
+
+# 0.15.6
+
+## Features
+
+- **M3DB**: Add per-namespace indexing runtime options to define concurrency weighted to indexing ([#2446](https://github.com/m3db/m3/pull/2446))
+
+## Performance
+
+- **M3DB**: Faster bootstrapping with deferred index checksum and significantly lower memory bump at block rotation ([#2446](https://github.com/m3db/m3/pull/2446))
+- **M3DB**: Faster series aggregate metadata queries by intersecting postings term results with resolved query postings list ([#2441](https://github.com/m3db/m3/pull/2441))
+
+## Bug Fixes
+
+- **M3Query**: Fix for label matching behavior not the same as regular Prometheus for when label value is ".+" or ".*" ([#2479](https://github.com/m3db/m3/pull/2479))
+- **M3Query**: Special case when request from Go process such as Prometheus Go client library for searching series metadata using Go min/max UTC values ([#2487](https://github.com/m3db/m3/pull/2487))
+- **M3Query**: Auto-detect querying for entire retention time range ([#2483](https://github.com/m3db/m3/pull/2483))
+- **M3Query**: Fix for Graphite query for metric with single identifier and no dot separated elements ([#2450](https://github.com/m3db/m3/pull/2450))
+
+## Documentation
+
+- **M3Coordinator**: Add rollup rules example documentation ([#2461](https://github.com/m3db/m3/pull/2461), [#2462](https://github.com/m3db/m3/pull/2462))
+
+## Misc
+
+- **M3DB**: Expose cluster total shards and replicas as metrics ([#2452](https://github.com/m3db/m3/pull/2452))
+
+# 0.15.5
+
+## Documentation
+
+- **All**: Minor documentation fixes ([#2438](https://github.com/m3db/m3/pull/2438))
+- **M3Query**: Add M3-Restrict-By-Tags-JSON example ([#2437](https://github.com/m3db/m3/pull/2437))
+
+## Misc
+
+- **M3DB**: Add continuous performance profiler that conditionally triggers with RPC endpoint ([#2416](https://github.com/m3db/m3/pull/2416))
+
+# 0.15.4
+
+## Features
+
+- **M3DB**: Performance increases for block rotation by streamlining indexing lock contention ([#2423](https://github.com/m3db/m3/pull/2423))
+- **M3DB**: Zero-copy of ID and fields on series index metadata re-indexing ([#2423](https://github.com/m3db/m3/pull/2423))
+- **M3Coordinator**: Add ability to restrict and block incoming series based on tag matchers ([#2430](https://github.com/m3db/m3/pull/2430))
+
+## Bug Fixes
+
+- **M3DB**: Fix an error where background compaction caused transient errors in queries ([#2432](https://github.com/m3db/m3/pull/2432))
+
+## Documentation
+
+- **M3Query**: Update config settings and cleaned up documentation for per query limits ([#2427](https://github.com/m3db/m3/pull/2427))
+
+# 0.15.3
## Features
+- **M3DB**: Ability to set per-query block limit ([#2415](https://github.com/m3db/m3/pull/2415))
+- **M3DB**: Ability to set global per-second query limit ([#2405](https://github.com/m3db/m3/pull/2405))
+
+## Bug Fixes
+
+- **M3DB**: Fix duplicate ID insertions causing transient error when flushing index block ([#2411](https://github.com/m3db/m3/pull/2411))
+- **M3Coordinator**: Mapping rules with drop policies now correctly apply to unaggregated metrics ([#2262](https://github.com/m3db/m3/pull/2262))
+- **M3Query**: Fix incorrect starting boundaries on some temporal queries ([#2413](https://github.com/m3db/m3/pull/2413))
+- **M3Query**: Fix bug in one to one matching in binary functions ([#2417](https://github.com/m3db/m3/pull/2417))
+- **M3DB**: Fix to edge case index data consistency on flush ([#2399](https://github.com/m3db/m3/pull/2399))
+
+# 0.15.2
+
+## Bug Fixes
+
+- **M3DB**: Fix require exhaustive propagation of require exhaustive option through RPC ([#2409](https://github.com/m3db/m3/pull/2409))
+
+# 0.15.1
+
+## Features
+
+- **M3DB**: Add ability to return an error when max time series limit is hit instead of partial result and warning ([#2400](https://github.com/m3db/m3/pull/2400))
+- **M3Coordinator**: Add support for namespace retention updates by API ([#2383](https://github.com/m3db/m3/pull/2383))
+
+## Bug Fixes
+
+- **M3Coordinator**: Fix Content-Type for OpenAPI handler ([#2403](https://github.com/m3db/m3/pull/2403))
+- **Build**: Build release binaries with goreleaser using Go 1.13 to match Go 1.13 docker images ([#2397](https://github.com/m3db/m3/pull/2397))
+
+## Misc
+
+- **M3DB**: Report a histogram of series blocks fetched per query ([#2381](https://github.com/m3db/m3/pull/2381))
+
+# 0.15.0
+
+## Features
+
+- **M3Ctl**: Add M3 command line tool for calling APIs and using YAML files to apply commands ([#2097](https://github.com/m3db/m3/pull/2097))
- **M3Coordinator**: Add public API to write annotations (i.e. arbitrary bytes), next to datapoints for things like storing exemplars ([#2022](https://github.com/m3db/m3/pull/2022), [#2029](https://github.com/m3db/m3/pull/2029), [#2031](https://github.com/m3db/m3/pull/2031))
- **M3Coordinator**: Add support for mapping rules, allowing metrics to be stored at different resolutions based on their labels/tags ([#2036](https://github.com/m3db/m3/pull/2036))
- **M3Coordinator**: Add Graphite mapping rule support ([#2060](https://github.com/m3db/m3/pull/2060)) ([#2063](https://github.com/m3db/m3/pull/2063))
- **M3Coordinator**: Add community contributed InfluxDB write endpoint (at /api/v1/influxdb/write) ([#2083](https://github.com/m3db/m3/pull/2083))
+- **M3Coordinator**: Add headers to pass along with request to remote write forward targets ([#2249](https://github.com/m3db/m3/pull/2249))
+- **M3Coordinator**: Add retry to remote write forward targets ([#2299](https://github.com/m3db/m3/pull/2299))
+- **M3Coordinator**: Add in-place M3Msg topic consumer updates with a PUT request ([#2186](https://github.com/m3db/m3/pull/2186))
+- **M3Coordinator**: Add ability to rewrite tags for Prometheus remote write requests using header ([#2255](https://github.com/m3db/m3/pull/2255))
+- **M3Coordinator**: Add config for multi-process launcher and SO_REUSEPORT listen servers for non-container based multi-process scaling ([#2292](https://github.com/m3db/m3/pull/2292))
+- **M3Query**: Add Prometheus engine to compliment Prometheus Remote Read, improves performance by skipping serialization/deserialization/network overhead between Prometheus and M3Query ([#2343](https://github.com/m3db/m3/pull/2343), [#2369](https://github.com/m3db/m3/pull/2369))
- **M3Query**: Add header to support enforcing all queries in request to implicitly always include a given label/tag matcher ([#2053](https://github.com/m3db/m3/pull/2053))
- **M3Query**: Return headers indicating incomplete results for cross-regional fanout queries when remote fails or hits a limit ([#2053](https://github.com/m3db/m3/pull/2053))
- **M3Query**: Refactor query server to allow for custom handlers ([#2073](https://github.com/m3db/m3/pull/2073))
+- **M3Query**: Add remote read debug parameters to look at raw data for a PromQL query and/or get results as JSON ([#2276](https://github.com/m3db/m3/pull/2276))
+- **M3Query**: Add warnings for Prometheus queries to Prometheus query JSON response ([#2265](https://github.com/m3db/m3/pull/2265))
+- **M3Query**: Add ability to set default query timeout by config ([#2226](https://github.com/m3db/m3/pull/2226))
+- **M3Aggregator**: Add M3Msg aggregator client for high throughput point to point clustered buffered delivery of metrics to aggregator ([#2171](https://github.com/m3db/m3/pull/2171))
+- **M3Aggregator**: Add rollup rule support for metrics aggregated with pre-existing timestamps, such as Prometheus metrics ([#2251](https://github.com/m3db/m3/pull/2251))
+- **M3Aggregator**: Add aggregator passthrough functionality for aggregation in a local region forwarding to a remote region for storage ([#2235](https://github.com/m3db/m3/pull/2235))
## Performance
@@ -71,36 +198,69 @@
- **M3DB**: Improve bootstrapping performance by allowing bootstrapping to be performed in a single pass, now possible for a lot of bootstraps to take just minutes depending on retention ([#1989](https://github.com/m3db/m3/pull/1989))
- **M3DB**: Use zero-copy references to index data instead of copy-on-read index data for each query, substantially improving query throughput and performance ([#1839](https://github.com/m3db/m3/pull/1839))
- **M3DB**: Further improve peer bootstrapping performance by using a document builder rather than raw memory segments ([#2078](https://github.com/m3db/m3/pull/2078))
+- **M3DB**: Concurrent indexing when building segments for newly inserted metrics ([#2146](https://github.com/m3db/m3/pull/2146))
+- **M3DB**: Decode ReadBits decompression improvements ([#2197](https://github.com/m3db/m3/pull/2197))
+- **M3DB**: Remove implicit cloning of time ranges to reduce allocs ([#2178](https://github.com/m3db/m3/pull/2178))
- **M3Query**: Substantially improve temporal function performance ([#2049](https://github.com/m3db/m3/pull/2049))
+- **M3Query**: Improve datapoint decompression speed ([#2176](https://github.com/m3db/m3/pull/2176), [#2185](https://github.com/m3db/m3/pull/2185), [#2190](https://github.com/m3db/m3/pull/2190))
+- **M3Query**: Read bits uses an optimized byte reader ([#2205](https://github.com/m3db/m3/pull/2205))
+- **M3Coordinator**: Ensure coordinator not grow M3Msg buffer if message over max size ([#2207](https://github.com/m3db/m3/pull/2207))
## Bug Fixes
-- **M3Query**: Fix edge cases with cross-zonal query fanout and add verify utility ([#1993](https://github.com/m3db/m3/pull/1993))
+- **M3Aggregator**: Take last value by wall clock timestamp not arrival time to avoid late arrivals overwriting actual later occuring values ([#2199](https://github.com/m3db/m3/pull/2199))
- **M3DB**: Validate indexable metrics for valid utf-8 prior to insert, also includes a utility for earlier M3DB versions to remove non-utf8 index data ([#2046](https://github.com/m3db/m3/pull/2046))
- **M3DB**: Remove incorrect error log message for missing schema with default non-protobuf namespaces ([#2013](https://github.com/m3db/m3/pull/2013))
- **M3DB**: Fixed memory leak causing index blocks to remain in memory after flushing ([#2037](https://github.com/m3db/m3/pull/2037))
- **M3DB**: Fix long standing possibility of double RLock acqusition ([#2128](https://github.com/m3db/m3/pull/2128))
- **M3DB**: Remove loop in fileset writer when previous fileset encountered an error writing out index files ([#2058](https://github.com/m3db/m3/pull/2058))
+- **M3DB**: Instead of encountering an error skip entries for unowned shards in commit log bootstrapper ([#2145](https://github.com/m3db/m3/pull/2145))
+- **M3DB**: Fix to avoid returning error when missing writable bucket with a cold flush ([#2188](https://github.com/m3db/m3/pull/2188))
+- **M3DB**: Set defaults and expose configuration of TChannel timeouts, this avoids idle connection growth ([#2173](https://github.com/m3db/m3/pull/2173))
+- **M3DB**: Account for Neg/Pos Offsets when building per field roaring bitmap posting lists ([#2213](https://github.com/m3db/m3/pull/2213))
+- **M3DB**: Fix to build flush errors ([#2229](https://github.com/m3db/m3/pull/2229), [#2217](https://github.com/m3db/m3/pull/2217))
+- **M3Coordinator**: Respect env and zone headers for topic API endpoints ([#2159](https://github.com/m3db/m3/pull/2159))
+- **M3Coordinator**: Add support for Graphite Grafana plugin /find POST requests ([#2153](https://github.com/m3db/m3/pull/2153))
+- **M3Coordinator**: Use tag options specified in config with M3Msg ingester ([#2212](https://github.com/m3db/m3/pull/2212))
+- **M3Coordinator**: Only honor default aggregation policies if not matched by mapping rule ([#2203](https://github.com/m3db/m3/pull/2203))
+- **M3Query**: Fix namespace resolve debug log not being written with multiple namespaces ([#2211](https://github.com/m3db/m3/pull/2211))
+- **M3Query**: Fix to temporal function regression leading to inconsistent results ([#2231](https://github.com/m3db/m3/pull/2231))
+- **M3Query**: Fix edge cases with cross-zonal query fanout and add verify utility ([#1993](https://github.com/m3db/m3/pull/1993))
+- **M3Query**: Fix issue with histogram grouping ([#2247](https://github.com/m3db/m3/pull/2247))
-## Documentation
+## Documentation
+- **M3Aggregator**: Add M3 aggregator Grafana dashboard ([#2064](https://github.com/m3db/m3/pull/2064))
+- **M3Coordinator**: Add documentation to write to multiple clusters from a single coordinator ([#2187](https://github.com/m3db/m3/pull/2187))
- **M3DB**: Add documentation about estimating number of unique time series ([#2062](https://github.com/m3db/m3/pull/2062))
- **M3DB**: Update namespace configuration documentation to use simpler duration specific keys ([#2045](https://github.com/m3db/m3/pull/2045))
-- **M3Aggregator**: Add M3 aggregator Grafana dashboard ([#2064](https://github.com/m3db/m3/pull/2064))
## Misc
+- **All**: Upgrade to Go 1.13 and switch dependency management to Go modules ([#2221](https://github.com/m3db/m3/pull/2221))
- **All**: Add gauge metrics to measure the number of active routines for any worker pool ([#2061](https://github.com/m3db/m3/pull/2061))
- **All**: Allow for ${ENV_VAR_NAME} expansion with YAML configuration files ([#2033](https://github.com/m3db/m3/pull/2033))
- **All**: Add a utility for comparing performance and correctness across different versions of M3DB, enabling diffing the perf of different versions ([#2044](https://github.com/m3db/m3/pull/2044))
- **All**: Upgrade etcd client library to 3.4.3 ([#2101](https://github.com/m3db/m3/pull/2101))
- **All**: Include key name in watch errors ([#2138](https://github.com/m3db/m3/pull/2138))
+- **Development**: Add HA Prometheus lab setup for dev M3 docker compose deployment ([#2206](https://github.com/m3db/m3/pull/2206))
+- **Development**: Temporarily disable kubeval validation to allow builds on go 1.12 ([#2241](https://github.com/m3db/m3/pull/2241))
+- **Development**: Add comparator value ingester for replaying functions against given data sets ([#2224](https://github.com/m3db/m3/pull/2224))
+- **Development**: Logging improvements ([#2222](https://github.com/m3db/m3/pull/2222),[#2225](https://github.com/m3db/m3/pull/2225))
+- **M3Aggregator**: Add a datasource variable and reuse it in all the panels of the aggregator dashboard ([#2182](https://github.com/m3db/m3/pull/2182))
+- **M3DB**: Add client bad request/internal error distinction for metrics and sampled logs ([#2201](https://github.com/m3db/m3/pull/2201))
- **M3DB**: Add latency metrics to remote reads ([#2027](https://github.com/m3db/m3/pull/2027))
- **M3DB**: Add metrics for async replication worker pool utilization ([#2059](https://github.com/m3db/m3/pull/2059))
- **M3DB**: Remove carbon debug flag and rely on log debug level for debugging Carbon/Graphite mapping rules ([#2024](https://github.com/m3db/m3/pull/2024))
-- **M3Query**: Allow both GET and POST for query APIs ([#2055](https://github.com/m3db/m3/pull/2055))
+- **M3DB**: Add metric for BootstrappedAndDurable ([#2210](https://github.com/m3db/m3/pull/2210))
+- **M3DB**: Use madvdontneed=1 in DB nodes to get a more accurate view of memory usage ([#2242](https://github.com/m3db/m3/pull/2242))
+- **M3DB**: Add trace spans for database bootstrap process helping to identify all remaining slow code paths ([#2216](https://github.com/m3db/m3/pull/2216))
- **M3Coordinator**: Add power user API to custom set placement goal state for cluster membership and shards ([#2108](https://github.com/m3db/m3/pull/2108))
- **M3Coordinator**: Delete M3 aggregator related etcd keys when aggregator placement deleted ([#2133](https://github.com/m3db/m3/pull/2133))
+- **M3Coordinator**: Add metrics for remote aggregator client and downsampler ([#2165](https://github.com/m3db/m3/pull/2165))
+- **M3Coordinator**: Add aggregator client maxBatchSize config for configuring buffer for data sent to aggregator ([#2166](https://github.com/m3db/m3/pull/2166))
+- **M3Query**: Allow both GET and POST for query APIs ([#2055](https://github.com/m3db/m3/pull/2055))
+- **M3Query**: Only build amd64 architecture for m3query releases ([#2202](https://github.com/m3db/m3/pull/2202))
# 0.14.2
@@ -109,7 +269,7 @@
- **M3DB**: Fix the persist cycle not cleaning up state for reuse when flush times cannot be calculated ([#2007](https://github.com/m3db/m3/pull/2007))
- **M3Query**: Add specialized matchers for empty EQ/NEQ matchers ([#1986](https://github.com/m3db/m3/pull/1986))
-## Misc
+## Misc
- **M3Aggregator**: Do not require aggregator ID to be joined with port and add instance initialization debug logs ([#2012](https://github.com/m3db/m3/pull/2012))
- **All**: Support env var expansion using [go.uber.org/config](go.uber.org/config) ([#2016](https://github.com/m3db/m3/pull/2016))
@@ -394,7 +554,7 @@ This changes also enables the ability to increase the fetch concurrency past the
As a result of this change, M3DB will allocate significantly less mmaps, but will create a corresponding amount of file descriptors.
-Operators may need to tune their kernel configuration to allow a higher number of open file descriptors. Please follow our [Kernel Configuration Guide](http://m3db.github.io/m3/operational_guide/kernel_configuration/) for more details.
+Operators may need to tune their kernel configuration to allow a higher number of open file descriptors. Please follow our [Kernel Configuration Guide](https://docs.m3db.io/operational_guide/kernel_configuration/) for more details.
## New Features
@@ -487,7 +647,7 @@ If you run into any issues with the upgrade or need to downgrade to a previous v
## Breaking changes
-- **M3Coordinator**: ID generation scheme must be explicitly defined in configs ([Set "legacy" if unsure, further information on migrating to 0.6.0](http://m3db.github.io/m3/how_to/query/#migration)) ([#1381](https://github.com/m3db/m3/pull/1381))
+- **M3Coordinator**: ID generation scheme must be explicitly defined in configs ([Set "legacy" if unsure, further information on migrating to 0.6.0](https://docs.m3db.io/how_to/query/#migration)) ([#1381](https://github.com/m3db/m3/pull/1381))
## New Features
@@ -510,7 +670,7 @@ If you run into any issues with the upgrade or need to downgrade to a previous v
## New Features
-- **M3Coordinator**: Add [Graphite support](http://m3db.github.io/m3/integrations/grafana/) in the form of Carbon ingestion (with configurable aggregation and storage policies), as well as direct and Grafana based Graphite querying support ([#1309](https://github.com/m3db/m3/pull/1309), [#1310](https://github.com/m3db/m3/pull/1310), [#1308](https://github.com/m3db/m3/pull/1308), [#1319](https://github.com/m3db/m3/pull/1319), [#1318](https://github.com/m3db/m3/pull/1318), [#1327](https://github.com/m3db/m3/pull/1327), [#1328](https://github.com/m3db/m3/pull/1328))
+- **M3Coordinator**: Add [Graphite support](https://docs.m3db.io/integrations/grafana/) in the form of Carbon ingestion (with configurable aggregation and storage policies), as well as direct and Grafana based Graphite querying support ([#1309](https://github.com/m3db/m3/pull/1309), [#1310](https://github.com/m3db/m3/pull/1310), [#1308](https://github.com/m3db/m3/pull/1308), [#1319](https://github.com/m3db/m3/pull/1319), [#1318](https://github.com/m3db/m3/pull/1318), [#1327](https://github.com/m3db/m3/pull/1327), [#1328](https://github.com/m3db/m3/pull/1328))
- **M3Coordinator**: Add tag completion API ([#1175](https://github.com/m3db/m3/pull/1175))
- **M3Coordinator**: Add new opt-in ID generation function that will never collide ([#1286](https://github.com/m3db/m3/pull/1286))
- **M3DB**: Add [endpoint](https://m3db.io/openapi/#operation/databaseConfigSetBootstrappers) for setting database bootstrapers dynamically([#1239](https://github.com/m3db/m3/pull/1239))
diff --git a/DEVELOPER.md b/DEVELOPER.md
index 426d991a1e..c0704b4e25 100644
--- a/DEVELOPER.md
+++ b/DEVELOPER.md
@@ -46,7 +46,7 @@ make m3dbnode
## Running the M3 stack locally
-Follow the instructions in `./scripts/development/m3_stack/README.md`
+Follow the instructions in [this README](./scripts/development/m3_stack/README.md).
## Testing Changes
diff --git a/Makefile b/Makefile
index b3ed88dd1b..74ec2e4b17 100644
--- a/Makefile
+++ b/Makefile
@@ -16,8 +16,8 @@ gopath_bin_path := $(GOPATH)/bin
m3_package := github.com/m3db/m3
m3_package_path := $(gopath_prefix)/$(m3_package)
mockgen_package := github.com/golang/mock/mockgen
-retool_bin_path := $(m3_package_path)/_tools/bin
-combined_bin_paths := $(retool_bin_path):$(gopath_bin_path)
+tools_bin_path := $(abspath ./_tools/bin)
+combined_bin_paths := $(tools_bin_path):$(gopath_bin_path)
retool_src_prefix := $(m3_package_path)/_tools/src
retool_package := github.com/twitchtv/retool
metalint_check := .ci/metalint.sh
@@ -32,7 +32,6 @@ assets_rules_dir := generated/assets
thrift_output_dir := generated/thrift/rpc
thrift_rules_dir := generated/thrift
vendor_prefix := vendor
-bad_trace_dep := go.etcd.io/etcd/vendor/golang.org/x/net/trace
cache_policy ?= recently_read
genny_target ?= genny-all
@@ -42,7 +41,9 @@ GO_BUILD_LDFLAGS_CMD := $(abspath ./scripts/go-build-ldflags.sh)
GO_BUILD_LDFLAGS := $(shell $(GO_BUILD_LDFLAGS_CMD) LDFLAG)
GO_BUILD_COMMON_ENV := CGO_ENABLED=0
LINUX_AMD64_ENV := GOOS=linux GOARCH=amd64 $(GO_BUILD_COMMON_ENV)
-GO_RELEASER_DOCKER_IMAGE := goreleaser/goreleaser:v0.117.2
+# GO_RELEASER_DOCKER_IMAGE is latest goreleaser for go 1.13
+GO_RELEASER_DOCKER_IMAGE := goreleaser/goreleaser:v0.127.0
+GO_RELEASER_RELEASE_ARGS ?= --rm-dist
GO_RELEASER_WORKING_DIR := /go/src/github.com/m3db/m3
GOMETALINT_VERSION := v2.0.5
@@ -61,11 +62,11 @@ SERVICES := \
m3aggregator \
m3query \
m3collector \
- m3ctl \
m3em_agent \
m3nsch_server \
m3nsch_client \
m3comparator \
+ r2ctl \
SUBDIRS := \
x \
@@ -81,19 +82,20 @@ SUBDIRS := \
m3ninx \
aggregator \
ctl \
- kube \
TOOLS := \
read_ids \
read_index_ids \
read_data_files \
read_index_files \
+ read_index_segments \
clone_fileset \
dtest \
verify_data_files \
verify_index_files \
carbon_load \
docs_test \
+ m3ctl \
.PHONY: setup
setup:
@@ -101,27 +103,15 @@ setup:
.PHONY: install-vendor-m3
install-vendor-m3:
- [ -d $(VENDOR) ] || make install-vendor
- # See comment for "install-vendor-m3-remove-bad-dep" why required and the TODO.
- make install-vendor-m3-remove-bad-dep
-
-# Some deps were causing panics when using GRPC and etcd libraries were used.
-# See issue: https://github.com/etcd-io/etcd/issues/9357
-# TODO: Move M3 to go mod to avoid the issue entirely instead of this hack
-# (which is bad and we should feel bad).
-# $ go test -v
-# panic: /debug/requests is already registered. You may have two independent
-# copies of golang.org/x/net/trace in your binary, trying to maintain separate
-# state. This may involve a vendored copy of golang.org/x/net/trace.
-#
-# goroutine 1 [running]:
-# github.com/m3db/m3/vendor/go.etcd.io/etcd/vendor/golang.org/x/net/trace.init.0()
-# /Users/r/go/src/github.com/m3db/m3/vendor/go.etcd.io/etcd/vendor/golang.org/x/net/trace/trace.go:123 +0x1cd
-# exit status 2
-# FAIL github.com/m3db/m3/src/query/remote 0.024s
-.PHONY: install-vendor-m3-remove-bad-dep
-install-vendor-m3-remove-bad-dep:
- ([ -d $(VENDOR)/$(bad_trace_dep) ] && rm -rf $(VENDOR)/$(bad_trace_dep)) || (echo "No bad trace dep" > /dev/null)
+ [ -d $(VENDOR) ] || GOSUMDB=off go mod vendor
+
+.PHONY: docker-dev-prep
+docker-dev-prep:
+ mkdir -p ./bin/config
+
+ # Hacky way to find all configs and put into ./bin/config/
+ find ./src | fgrep config | fgrep ".yml" | xargs -I{} cp {} ./bin/config/
+ find ./src | fgrep config | fgrep ".yaml" | xargs -I{} cp {} ./bin/config/
define SERVICE_RULES
@@ -141,11 +131,7 @@ $(SERVICE)-linux-amd64:
.PHONY: $(SERVICE)-docker-dev
$(SERVICE)-docker-dev: clean-build $(SERVICE)-linux-amd64
- mkdir -p ./bin/config
-
- # Hacky way to find all configs and put into ./bin/config/
- find ./src | fgrep config | fgrep ".yml" | xargs -I{} cp {} ./bin/config/
- find ./src | fgrep config | fgrep ".yaml" | xargs -I{} cp {} ./bin/config/
+ make docker-dev-prep
# Build development docker image
docker build -t $(SERVICE):dev -t quay.io/m3dbtest/$(SERVICE):dev-$(USER) -f ./docker/$(SERVICE)/development.Dockerfile ./bin
@@ -188,38 +174,27 @@ tools-linux-amd64:
all: metalint test-ci-unit test-ci-integration services tools
@echo Made all successfully
-.PHONY: install-retool
-install-retool:
- @which retool >/dev/null || go get $(retool_package)
-
.PHONY: install-tools
-install-tools: install-retool
- @echo "Installing retool dependencies"
- PATH=$(PATH):$(gopath_bin_path) retool $(retool_base_args) sync
- PATH=$(PATH):$(gopath_bin_path) retool $(retool_base_args) build
-
- @# NB(r): to ensure correct version of mock-gen is present we match the version
- @# of the retool installed mockgen, and if not a match in binary contents, then
- @# we explicitly install at the version we desire.
- @# We cannot solely use the retool binary as mock-gen requires its full source
- @# code to be present in the GOPATH at runtime.
- @echo "Installing mockgen"
- $(eval curr_mockgen_md5=`cat $(gopath_bin_path)/mockgen | go run $(m3_package_path)/scripts/md5/md5.go`)
- $(eval retool_mockgen_md5=`cat $(retool_bin_path)/mockgen | go run $(m3_package_path)/scripts/md5/md5.go`)
- @test "$(curr_mockgen_md5)" = "$(retool_mockgen_md5)" && echo "Mockgen already up to date" || ( \
- echo "Installing mockgen from Retool directory" && \
- rm -rf $(gopath_prefix)/$(mockgen_package) && \
- mkdir -p $(shell dirname $(gopath_prefix)/$(mockgen_package)) && \
- cp -r $(retool_src_prefix)/$(mockgen_package) $(gopath_prefix)/$(mockgen_package) && \
- (rm $(gopath_bin_path)/mockgen || echo "No installed mockgen" > /dev/null) && \
- cp $(retool_bin_path)/mockgen $(gopath_bin_path)/mockgen && \
- echo "Installed mockgen from Retool directory" \
- )
+install-tools:
+ @echo "Installing build tools"
+ GOBIN=$(tools_bin_path) go install github.com/fossas/fossa-cli/cmd/fossa
+ GOBIN=$(tools_bin_path) go install github.com/golang/mock/mockgen
+ GOBIN=$(tools_bin_path) go install github.com/google/go-jsonnet/cmd/jsonnet
+ GOBIN=$(tools_bin_path) go install github.com/m3db/build-tools/linters/badtime
+ GOBIN=$(tools_bin_path) go install github.com/m3db/build-tools/linters/importorder
+ GOBIN=$(tools_bin_path) go install github.com/m3db/build-tools/utilities/genclean
+ GOBIN=$(tools_bin_path) go install github.com/m3db/tools/update-license
+ GOBIN=$(tools_bin_path) go install github.com/mauricelam/genny
+ GOBIN=$(tools_bin_path) go install github.com/mjibson/esc
+ GOBIN=$(tools_bin_path) go install github.com/pointlander/peg
+ GOBIN=$(tools_bin_path) go install github.com/robskillington/gorename
+ GOBIN=$(tools_bin_path) go install github.com/rakyll/statik
+ GOBIN=$(tools_bin_path) go install github.com/garethr/kubeval
.PHONY: install-gometalinter
install-gometalinter:
- @mkdir -p $(retool_bin_path)
- ./scripts/install-gometalinter.sh -b $(retool_bin_path) -d $(GOMETALINT_VERSION)
+ @mkdir -p $(tools_bin_path)
+ ./scripts/install-gometalinter.sh -b $(tools_bin_path) -d $(GOMETALINT_VERSION)
.PHONY: check-for-goreleaser-github-token
check-for-goreleaser-github-token:
@@ -232,12 +207,12 @@ check-for-goreleaser-github-token:
release: check-for-goreleaser-github-token
@echo Releasing new version
$(GO_BUILD_LDFLAGS_CMD) ECHO > $(BUILD)/release-vars.env
- docker run -e "GITHUB_TOKEN=$(GITHUB_TOKEN)" --env-file $(BUILD)/release-vars.env -v $(PWD):$(GO_RELEASER_WORKING_DIR) -w $(GO_RELEASER_WORKING_DIR) $(GO_RELEASER_DOCKER_IMAGE) release --rm-dist
+ docker run -e "GITHUB_TOKEN=$(GITHUB_TOKEN)" --env-file $(BUILD)/release-vars.env -v $(PWD):$(GO_RELEASER_WORKING_DIR) -w $(GO_RELEASER_WORKING_DIR) $(GO_RELEASER_DOCKER_IMAGE) release $(GO_RELEASER_RELEASE_ARGS)
.PHONY: release-snapshot
release-snapshot: check-for-goreleaser-github-token
@echo Creating snapshot release
- docker run -e "GITHUB_TOKEN=$(GITHUB_TOKEN)" -v $(PWD):$(GO_RELEASER_WORKING_DIR) -w $(GO_RELEASER_WORKING_DIR) $(GO_RELEASER_DOCKER_IMAGE) --snapshot --rm-dist
+ make release GO_RELEASER_RELEASE_ARGS="--snapshot --rm-dist"
.PHONY: docs-container
docs-container:
@@ -250,15 +225,15 @@ docs-container:
# shell).
.PHONY: docs-build
docs-build: docs-container
- docker run -v $(PWD):/m3db --rm m3db-docs "mkdocs build -e docs/theme -t material"
+ docker run -v $(PWD):/m3db --rm m3db-docs "mkdocs build -t material"
.PHONY: docs-serve
docs-serve: docs-container
- docker run -v $(PWD):/m3db -p 8000:8000 -it --rm m3db-docs "mkdocs serve -e docs/theme -t material -a 0.0.0.0:8000"
+ docker run -v $(PWD):/m3db -p 8000:8000 -it --rm m3db-docs "mkdocs serve -t material -a 0.0.0.0:8000"
.PHONY: docs-deploy
docs-deploy: docs-container
- docker run -v $(PWD):/m3db --rm -v $(HOME)/.ssh/id_rsa:/root/.ssh/id_rsa:ro -it m3db-docs "mkdocs build -e docs/theme -t material && mkdocs gh-deploy --force --dirty"
+ docker run -v $(PWD):/m3db --rm -v $(HOME)/.ssh/id_rsa:/root/.ssh/id_rsa:ro -it m3db-docs "mkdocs build -t material && mkdocs gh-deploy --force --dirty"
.PHONY: docs-validate
docs-validate: docs_test
@@ -276,12 +251,16 @@ docker-integration-test:
@echo "--- Running Docker integration test"
./scripts/docker-integration-tests/run.sh
-
.PHONY: docker-compatibility-test
docker-compatibility-test:
@echo "--- Running Prometheus compatibility test"
./scripts/comparator/run.sh
+.PHONY: prom-compat
+prom-compat:
+ @echo "--- Running local Prometheus compatibility test"
+ CI="false" make docker-compatibility-test
+
.PHONY: site-build
site-build:
@echo "Building site"
@@ -291,8 +270,8 @@ site-build:
.PHONY: config-gen
config-gen: install-tools
@echo "--- Generating configs"
- $(retool_bin_path)/jsonnet -S $(m3_package_path)/config/m3db/local-etcd/m3dbnode_cmd.jsonnet > $(m3_package_path)/config/m3db/local-etcd/generated.yaml
- $(retool_bin_path)/jsonnet -S $(m3_package_path)/config/m3db/clustered-etcd/m3dbnode_cmd.jsonnet > $(m3_package_path)/config/m3db/clustered-etcd/generated.yaml
+ $(tools_bin_path)/jsonnet -S $(m3_package_path)/config/m3db/local-etcd/m3dbnode_cmd.jsonnet > $(m3_package_path)/config/m3db/local-etcd/generated.yaml
+ $(tools_bin_path)/jsonnet -S $(m3_package_path)/config/m3db/clustered-etcd/m3dbnode_cmd.jsonnet > $(m3_package_path)/config/m3db/clustered-etcd/generated.yaml
SUBDIR_TARGETS := \
mock-gen \
@@ -314,23 +293,11 @@ test-ci-big-unit: test-big-base
.PHONY: test-ci-integration
test-ci-integration:
- INTEGRATION_TIMEOUT=4m TEST_SERIES_CACHE_POLICY=$(cache_policy) make test-base-ci-integration
+ INTEGRATION_TIMEOUT=10m TEST_SERIES_CACHE_POLICY=$(cache_policy) make test-base-ci-integration
$(process_coverfile) $(coverfile)
define SUBDIR_RULES
-# We override the rules for `*-gen-kube` to just generate the kube manifest
-# bundle.
-ifeq ($(SUBDIR), kube)
-
-# Builds the single kube bundle from individual manifest files.
-all-gen-kube: install-tools
- @echo "--- Generating kube bundle"
- @./kube/scripts/build_bundle.sh
- find kube -name '*.yaml' -print0 | PATH=$(combined_bin_paths):$(PATH) xargs -0 kubeval -v=1.12.0
-
-else
-
.PHONY: mock-gen-$(SUBDIR)
mock-gen-$(SUBDIR): install-tools
@echo "--- Generating mocks $(SUBDIR)"
@@ -355,12 +322,14 @@ asset-gen-$(SUBDIR): install-tools
@[ ! -d src/$(SUBDIR)/$(assets_rules_dir) ] || \
PATH=$(combined_bin_paths):$(PATH) PACKAGE=$(m3_package) $(auto_gen) src/$(SUBDIR)/$(assets_output_dir) src/$(SUBDIR)/$(assets_rules_dir)
+# NB(schallert): gorename (used by our genny process) doesn't work with go
+# modules https://github.com/golang/go/issues/34222
.PHONY: genny-gen-$(SUBDIR)
genny-gen-$(SUBDIR): install-tools
@echo "--- Generating genny files $(SUBDIR)"
@[ ! -f $(SELF_DIR)/src/$(SUBDIR)/generated-source-files.mk ] || \
- PATH=$(combined_bin_paths):$(PATH) make -f $(SELF_DIR)/src/$(SUBDIR)/generated-source-files.mk $(genny_target)
- @PATH=$(combined_bin_paths):$(PATH) bash -c "source ./scripts/auto-gen-helpers.sh && gen_cleanup_dir '*_gen.go' $(SELF_DIR)/src/$(SUBDIR)/ && gen_cleanup_dir '*_gen_test.go' $(SELF_DIR)/src/$(SUBDIR)/"
+ PATH=$(combined_bin_paths):$(PATH) GO111MODULE=off make -f $(SELF_DIR)/src/$(SUBDIR)/generated-source-files.mk $(genny_target)
+ @PATH=$(combined_bin_paths):$(PATH) GO111MODULE=off bash -c "source ./scripts/auto-gen-helpers.sh && gen_cleanup_dir '*_gen.go' $(SELF_DIR)/src/$(SUBDIR)/ && gen_cleanup_dir '*_gen_test.go' $(SELF_DIR)/src/$(SUBDIR)/"
.PHONY: license-gen-$(SUBDIR)
license-gen-$(SUBDIR): install-tools
@@ -416,7 +385,7 @@ test-ci-big-unit-$(SUBDIR):
.PHONY: test-ci-integration-$(SUBDIR)
test-ci-integration-$(SUBDIR):
@echo "--- test-ci-integration $(SUBDIR)"
- SRC_ROOT=./src/$(SUBDIR) PANIC_ON_INVARIANT_VIOLATED=true INTEGRATION_TIMEOUT=4m TEST_SERIES_CACHE_POLICY=$(cache_policy) make test-base-ci-integration
+ SRC_ROOT=./src/$(SUBDIR) PANIC_ON_INVARIANT_VIOLATED=true INTEGRATION_TIMEOUT=10m TEST_SERIES_CACHE_POLICY=$(cache_policy) make test-base-ci-integration
@echo "--- uploading coverage report"
$(codecov_push) -f $(coverfile) -F $(SUBDIR)
@@ -426,8 +395,6 @@ metalint-$(SUBDIR): install-gometalinter install-linter-badtime install-linter-i
@(PATH=$(combined_bin_paths):$(PATH) $(metalint_check) \
$(metalint_config) $(metalint_exclude) src/$(SUBDIR))
-endif
-
endef
# generate targets for each SUBDIR in SUBDIRS based on the rules specified above.
@@ -444,6 +411,25 @@ endef
# of metalint and finishes faster.
$(foreach SUBDIR_TARGET, $(filter-out metalint,$(SUBDIR_TARGETS)), $(eval $(SUBDIR_TARGET_RULE)))
+# Builds the single kube bundle from individual manifest files.
+.PHONY: kube-gen-all
+kube-gen-all: install-tools
+ @echo "--- Generating kube bundle"
+ @./kube/scripts/build_bundle.sh
+ find kube -name '*.yaml' -print0 | PATH=$(combined_bin_paths):$(PATH) xargs -0 kubeval -v=1.12.0
+
+.PHONY: go-mod-tidy
+go-mod-tidy:
+ @echo "--- :golang: tidying modules"
+ go mod tidy
+
+.PHONY: all-gen
+all-gen: \
+ install-tools \
+ $(foreach SUBDIR_TARGET, $(filter-out metalint all-gen,$(SUBDIR_TARGETS)), $(SUBDIR_TARGET)) \
+ kube-gen-all \
+ go-mod-tidy
+
.PHONY: build-ui-ctl
build-ui-ctl:
ifeq ($(shell ls ./src/ctl/ui/build 2>/dev/null),)
@@ -470,7 +456,7 @@ build-ui-ctl-statik-gen: build-ui-ctl-statik license-gen-ctl
.PHONY: build-ui-ctl-statik
build-ui-ctl-statik: build-ui-ctl install-tools
mkdir -p ./src/ctl/generated/ui
- $(retool_bin_path)/statik -m -f -src ./src/ctl/ui/build -dest ./src/ctl/generated/ui -p statik
+ $(tools_bin_path)/statik -m -f -src ./src/ctl/ui/build -dest ./src/ctl/generated/ui -p statik
.PHONY: node-yarn-run
node-yarn-run:
@@ -492,9 +478,9 @@ else
endif
.PHONY: metalint
-metalint: install-gometalinter install-linter-badtime install-linter-importorder
+metalint: install-gometalinter install-tools
@echo "--- metalinting src/"
- @(PATH=$(retool_bin_path):$(PATH) $(metalint_check) \
+ @(PATH=$(tools_bin_path):$(PATH) $(metalint_check) \
$(metalint_config) $(metalint_exclude) $(m3_package_path)/src/)
# Tests that all currently generated types match their contents if they were regenerated
diff --git a/README.md b/README.md
index d12e11015d..96fa1fe0bb 100644
--- a/README.md
+++ b/README.md
@@ -1,35 +1,31 @@
-# M3 [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![FOSSA Status][fossa-img]][fossa] [![Gitter chat][gitter-img]][gitter]
+# M3 [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![FOSSA Status][fossa-img]][fossa]
-[Distributed TSDB](http://m3db.github.io/m3/m3db/) and [Query Engine](http://m3db.github.io/m3/how_to/query/), [Prometheus Sidecar](http://m3db.github.io/m3/integrations/prometheus/), [Metrics Aggregator](http://m3db.github.io/m3/introduction/components/components/#m3-aggregator), and more. "More" now includes [Graphite storage and query engine](http://m3db.github.io/m3/integrations/graphite/)!
+[Distributed TSDB](https://docs.m3db.io/m3db/) and [Query Engine](https://docs.m3db.io/how_to/query/), [Prometheus Sidecar](https://docs.m3db.io/integrations/prometheus/), [Metrics Aggregator](https://docs.m3db.io/overview/components/#m3-aggregator), and more such as [Graphite storage and query engine](https://docs.m3db.io/integrations/graphite/).
More information:
-- [Documentation](https://m3db.github.io/m3/)
+- [Documentation](https://docs.m3db.io/)
- [Developer: Getting Started](https://github.com/m3db/m3/blob/master/DEVELOPER.md)
-- [Slack (primary chat channel)](http://bit.ly/m3slack)
-- [Gitter (deprecated chat channel)](https://gitter.im/m3db/Lobby)
+- [Slack](http://bit.ly/m3slack)
- [Forum (Google Group)](https://groups.google.com/forum/#!forum/m3db)
-- [Twitter](https://twitter.com/m3db_io)
+- [Twitter](https://twitter.com/m3metrics)
## Community meetings
-M3 contributors and maintainers have monthly (every four weeks) meetings at 11:00 AM (USA Pacific) on Tuesday.
+M3 contributors and maintainers have monthly (every four weeks) meetings. Join our M3 meetup group to receive notifications on upcoming meetings:
+[https://www.meetup.com/M3-Community/](https://www.meetup.com/M3-Community/).
-An initial agenda is posted to the [community meeting shared Google doc](https://docs.google.com/document/d/1eGAd2A8FVtiip5wHxHnmSkz7e_qHc9k_4hGI0vUGdHM/edit?usp=sharing) a day before each meeting, and everyone is welcome to suggest additional topics or other agendas.
+Recordings of past meetups can be found here: [https://vimeo.com/user/120001164/folder/2290331](https://vimeo.com/user/120001164/folder/2290331).
-You can add the following public Google Calendar if you're using Google Calendar (so that any changes are reflected in your own calendar when the invite is updated):
-[https://calendar.google.com/calendar?cid=aWc5YWFxZWw0azFmMnRoZDA2dHBtZzZva2tAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ](https://calendar.google.com/calendar?cid=aWc5YWFxZWw0azFmMnRoZDA2dHBtZzZva2tAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ)
+## Office Hours
-For those not using Google Calendar, please download and import the following iCalendar (.ics) files to calendar system for the monthly invite:
-[https://calendar.google.com/calendar/ical/ig9aaqel4k1f2thd06tpmg6okk%40group.calendar.google.com/public/basic.ics](https://calendar.google.com/calendar/ical/ig9aaqel4k1f2thd06tpmg6okk%40group.calendar.google.com/public/basic.ics)
-
-Join Zoom Meeting: [us04web.zoom.us/j/519434268](https://us04web.zoom.us/j/519434268)
+Members of the M3 team will hold office hours on the third Thursday of every month from 11-1pm EST. In order to join, make sure to sign up for a slot here: [https://calendly.com/chronosphere-intro/m3-community-office-hours](https://calendly.com/chronosphere-intro/m3-community-office-hours).
## Test it out
-The easiest way to testing out M3 is to follow one of the guides from the documentation. For a fully comprehensive getting started guide, see our [single node how-to](https://m3db.github.io/m3/how_to/single_node/).
+The easiest way to testing out M3 is to follow one of the guides from the documentation. For a fully comprehensive getting started guide, see our [single node how-to](https://docs.m3db.io/how_to/single_node/).
### Starting a node
@@ -143,5 +139,3 @@ This project is released under the [Apache License, Version 2.0](LICENSE).
[cov]: https://codecov.io/gh/m3db/m3
[fossa-img]: https://app.fossa.io/api/projects/custom%2B4529%2Fgithub.com%2Fm3db%2Fm3.svg?type=shield
[fossa]: https://app.fossa.io/projects/custom%2B4529%2Fgithub.com%2Fm3db%2Fm3?ref=badge_shield
-[gitter-img]: https://badges.gitter.im/m3db.png
-[gitter]: https://gitter.im/m3db/Lobby
diff --git a/docker-compose.yml b/docker-compose.yml
index 3d4348e289..5097f20e75 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,5 +1,5 @@
app:
- image: golang:1.12-stretch
+ image: golang:1.13-stretch
volumes:
- .:/go/src/github.com/m3db/m3
- /usr/bin/buildkite-agent:/usr/bin/buildkite-agent
diff --git a/docker/grafana/Dockerfile b/docker/grafana/Dockerfile
index 1eeb6de1f4..8ec094c156 100644
--- a/docker/grafana/Dockerfile
+++ b/docker/grafana/Dockerfile
@@ -8,6 +8,8 @@ COPY ./integrations/grafana/m3query_dashboard.json /tmp/grafana_dashboards/m3que
COPY ./integrations/grafana/m3coordinator_dashboard.json /tmp/grafana_dashboards/m3coordinator_dashboard.json
COPY ./integrations/grafana/m3db_dashboard.json /tmp/grafana_dashboards/m3db_dashboard.json
COPY ./integrations/grafana/temporal_function_comparison.json /tmp/grafana_dashboards/temporal_function_comparison.json
+COPY ./integrations/grafana/m3aggregator_dashboard.json /tmp/grafana_dashboards/m3aggregator_dashboard.json
+COPY ./integrations/grafana/m3aggregator_end_to_end_details.json /tmp/grafana_dashboards/m3aggregator_end_to_end_details.json
# Need to replace datasource template variable with name of actual data source so auto-import
# JustWorksTM. Use a temporary directory to host the dashboards since the default
diff --git a/docker/m3aggregator/Dockerfile b/docker/m3aggregator/Dockerfile
index cb2480b066..b637388c29 100644
--- a/docker/m3aggregator/Dockerfile
+++ b/docker/m3aggregator/Dockerfile
@@ -1,9 +1,9 @@
# stage 1: build
-FROM golang:1.12-alpine3.9 AS builder
+FROM golang:1.13-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
-# Install Glide
-RUN apk add --update glide git make bash
+# Install deps
+RUN apk add --update git make bash
# Add source code
RUN mkdir -p /go/src/github.com/m3db/m3
@@ -15,10 +15,10 @@ RUN cd /go/src/github.com/m3db/m3/ && \
make m3aggregator-linux-amd64
# stage 2: lightweight "release"
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
-EXPOSE 5000/tcp 6000/tcp 60001/tcp 7203/tcp 9000-9004/tcp
+EXPOSE 5000/tcp 6000/tcp 6001/tcp
RUN apk add --no-cache curl jq
diff --git a/docker/m3aggregator/development.Dockerfile b/docker/m3aggregator/development.Dockerfile
new file mode 100644
index 0000000000..ee649c4d64
--- /dev/null
+++ b/docker/m3aggregator/development.Dockerfile
@@ -0,0 +1,10 @@
+FROM alpine:3.11
+LABEL maintainer="The M3DB Authors "
+
+EXPOSE 5000/tcp 6000/tcp 6001/tcp
+
+ADD ./m3aggregator /bin/m3aggregator
+ADD ./config/m3aggregator.yml /etc/m3aggregator/m3aggregator.yml
+
+ENTRYPOINT [ "/bin/m3aggregator" ]
+CMD [ "-f", "/etc/m3aggregator/m3aggregator.yml" ]
diff --git a/docker/m3collector/Dockerfile b/docker/m3collector/Dockerfile
index 5c81965a2c..f0e55b9930 100644
--- a/docker/m3collector/Dockerfile
+++ b/docker/m3collector/Dockerfile
@@ -1,9 +1,9 @@
# stage 1: build
-FROM golang:1.12-alpine3.9 AS builder
+FROM golang:1.13-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
-# Install Glide
-RUN apk add --update glide git make bash
+# Install deps
+RUN apk add --update git make bash
# Add source code
RUN mkdir -p /go/src/github.com/m3db/m3
@@ -15,7 +15,7 @@ RUN cd /go/src/github.com/m3db/m3/ && \
make m3collector-linux-amd64
# stage 2: lightweight "release"
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
EXPOSE 7206-7207/tcp
diff --git a/docker/m3collector/development.Dockerfile b/docker/m3collector/development.Dockerfile
new file mode 100644
index 0000000000..47a0a6942d
--- /dev/null
+++ b/docker/m3collector/development.Dockerfile
@@ -0,0 +1,10 @@
+FROM alpine:3.11
+LABEL maintainer="The M3DB Authors "
+
+EXPOSE 7206/tcp 7207/tcp
+
+ADD ./m3collector /bin/m3collector
+ADD ./config/m3collector.yml /etc/m3collector/m3collector.yml
+
+ENTRYPOINT [ "/bin/m3collector" ]
+CMD [ "-f", "/etc/m3collector/m3collector.yml" ]
diff --git a/docker/m3coordinator/Dockerfile b/docker/m3coordinator/Dockerfile
index ec15246cae..675183b68d 100644
--- a/docker/m3coordinator/Dockerfile
+++ b/docker/m3coordinator/Dockerfile
@@ -1,9 +1,9 @@
# stage 1: build
-FROM golang:1.12-alpine3.9 AS builder
+FROM golang:1.13-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
-# Install Glide
-RUN apk add --update glide git make bash
+# Install deps
+RUN apk add --update git make bash
# Add source code
RUN mkdir -p /go/src/github.com/m3db/m3
@@ -15,7 +15,7 @@ RUN cd /go/src/github.com/m3db/m3/ && \
make m3coordinator-linux-amd64
# stage 2: lightweight "release"
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
EXPOSE 7201/tcp 7203/tcp
diff --git a/docker/m3coordinator/development.Dockerfile b/docker/m3coordinator/development.Dockerfile
index 9e08aee1d7..ef519ebb6b 100644
--- a/docker/m3coordinator/development.Dockerfile
+++ b/docker/m3coordinator/development.Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
EXPOSE 7201/tcp 7203/tcp
diff --git a/docker/m3dbnode/Dockerfile b/docker/m3dbnode/Dockerfile
index d883f05f38..de6ca6e830 100644
--- a/docker/m3dbnode/Dockerfile
+++ b/docker/m3dbnode/Dockerfile
@@ -1,13 +1,9 @@
-# stage 1: build. We have to use an older version of alpine, as git 2.22 removes
-# the '-x' flag to a submodule command which in turn breaks glide, the old AF
-# dependency tool we have. Until we migrate to a new version of glide or go
-# modules, we need to use this older base image.
-# https://github.com/m3db/m3/issues/628
-FROM golang:1.12-alpine3.9 AS builder
+# stage 1: build
+FROM golang:1.13-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
-# Install Glide
-RUN apk add --update glide git make bash
+# Install deps
+RUN apk add --update git make bash
# Add source code
RUN mkdir -p /go/src/github.com/m3db/m3
@@ -19,9 +15,11 @@ RUN cd /go/src/github.com/m3db/m3/ && \
make m3dbnode-linux-amd64
# Stage 2: lightweight "release"
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
+ENV GODEBUG madvdontneed=1
+
EXPOSE 2379/tcp 2380/tcp 7201/tcp 7203/tcp 9000-9004/tcp
RUN apk add --no-cache curl jq
@@ -31,5 +29,7 @@ COPY --from=builder /go/src/github.com/m3db/m3/bin/m3dbnode \
/go/src/github.com/m3db/m3/scripts/m3dbnode_bootstrapped.sh \
/bin/
+ENV GODEBUG madvdontneed=1
+
ENTRYPOINT [ "/bin/m3dbnode" ]
CMD [ "-f", "/etc/m3dbnode/m3dbnode.yml" ]
diff --git a/docker/m3dbnode/Dockerfile-setcap b/docker/m3dbnode/Dockerfile-setcap
index d8a48521b1..a2b7463d27 100644
--- a/docker/m3dbnode/Dockerfile-setcap
+++ b/docker/m3dbnode/Dockerfile-setcap
@@ -1,9 +1,9 @@
# stage 1: build
-FROM golang:1.12-alpine3.9 AS builder
+FROM golang:1.13-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
-# Install Glide
-RUN apk add --update glide git make bash
+# Install deps
+RUN apk add --update git make bash
# Add source code
RUN mkdir -p /go/src/github.com/m3db/m3
@@ -15,9 +15,11 @@ RUN cd /go/src/github.com/m3db/m3/ && \
make m3dbnode-linux-amd64
# Stage 2: lightweight "release"
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
+ENV GODEBUG madvdontneed=1
+
EXPOSE 2379/tcp 2380/tcp 7201/tcp 7203/tcp 9000-9004/tcp
COPY --from=builder /go/src/github.com/m3db/m3/src/dbnode/config/m3dbnode-local-etcd.yml /etc/m3dbnode/m3dbnode.yml
@@ -30,5 +32,7 @@ COPY --from=builder /go/src/github.com/m3db/m3/bin/m3dbnode \
RUN apk add --no-cache curl jq libcap && \
setcap cap_sys_resource=+ep /bin/m3dbnode
+ENV GODEBUG madvdontneed=1
+
ENTRYPOINT [ "/bin/m3dbnode" ]
CMD [ "-f", "/etc/m3dbnode/m3dbnode.yml" ]
diff --git a/docker/m3dbnode/development.Dockerfile b/docker/m3dbnode/development.Dockerfile
index bd5af11e59..afbcd2957e 100644
--- a/docker/m3dbnode/development.Dockerfile
+++ b/docker/m3dbnode/development.Dockerfile
@@ -1,6 +1,8 @@
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
+ENV GODEBUG madvdontneed=1
+
RUN apk add --no-cache curl jq
# Add m3dbnode binary
@@ -9,5 +11,7 @@ ADD ./config/m3dbnode-local-etcd.yml /etc/m3dbnode/m3dbnode.yml
EXPOSE 2379/tcp 2380/tcp 7201/tcp 7203/tcp 9000-9004/tcp
+ENV GODEBUG madvdontneed=1
+
ENTRYPOINT [ "/bin/m3dbnode" ]
CMD [ "-f", "/etc/m3dbnode/m3dbnode.yml" ]
diff --git a/docker/m3nsch/Dockerfile b/docker/m3nsch/Dockerfile
index 7adea0104e..92101f109e 100644
--- a/docker/m3nsch/Dockerfile
+++ b/docker/m3nsch/Dockerfile
@@ -1,9 +1,9 @@
# stage 1: build
-FROM golang:1.12-alpine3.9 AS builder
+FROM golang:1.13-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
-# Install Glide
-RUN apk add --update glide git make bash
+# Install deps
+RUN apk add --update git make bash
# Add source code
RUN mkdir -p /go/src/github.com/m3db/m3
@@ -16,7 +16,7 @@ RUN cd /go/src/github.com/m3db/m3/ && \
make m3nsch_client-linux-amd64
# stage 2: lightweight "release"
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
COPY --from=builder /go/src/github.com/m3db/m3/bin/m3nsch_server /bin/
diff --git a/docker/m3query/Dockerfile b/docker/m3query/Dockerfile
index 2788256110..58a6d8fe8a 100644
--- a/docker/m3query/Dockerfile
+++ b/docker/m3query/Dockerfile
@@ -1,9 +1,9 @@
# stage 1: build
-FROM golang:1.12-alpine3.9 AS builder
+FROM golang:1.13-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
-# Install Glide
-RUN apk add --update glide git make bash
+# Install deps
+RUN apk add --update git make bash
# Add source code
RUN mkdir -p /go/src/github.com/m3db/m3
@@ -15,7 +15,7 @@ RUN cd /go/src/github.com/m3db/m3/ && \
make m3query-linux-amd64
# stage 2: lightweight "release"
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
EXPOSE 7201/tcp 7203/tcp
diff --git a/docker/m3query/development.Dockerfile b/docker/m3query/development.Dockerfile
new file mode 100644
index 0000000000..876657a355
--- /dev/null
+++ b/docker/m3query/development.Dockerfile
@@ -0,0 +1,10 @@
+FROM alpine:3.11
+LABEL maintainer="The M3DB Authors "
+
+EXPOSE 7201/tcp 7203/tcp
+
+ADD ./m3query /bin/m3query
+ADD ./config/m3query-local-etcd.yml /etc/m3query/m3query.yml
+
+ENTRYPOINT [ "/bin/m3query" ]
+CMD [ "-f", "/etc/m3query/m3query.yml" ]
diff --git a/docker/sysctl-setter/Dockerfile b/docker/sysctl-setter/Dockerfile
index 7c63bc3a3e..9a053a4fc1 100644
--- a/docker/sysctl-setter/Dockerfile
+++ b/docker/sysctl-setter/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine:latest
+FROM alpine:3.11
LABEL maintainer="The M3DB Authors "
RUN apk add --no-cache procps && echo $'#!/bin/ash\n\
diff --git a/docs/Dockerfile b/docs/Dockerfile
index 95bd225b00..a12c2ee526 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -10,8 +10,7 @@ EXPOSE 8000
# mkdocs needs git-fast-import which was stripped from the default git package
# by default to reduce size
RUN pip install \
- mkdocs==0.17.3 \
- pymdown-extensions==6.0 \
- mkdocs-material==2.7.3
+ nltk==3.4.5 \
+ mkdocs-material==5.5.3
RUN apk add --no-cache git-fast-import openssh-client
ENTRYPOINT [ "/bin/ash", "-c" ]
diff --git a/docs/common/headers_optional_read_all.md b/docs/common/headers_optional_read_all.md
new file mode 100644
index 0000000000..a21d0fbca6
--- /dev/null
+++ b/docs/common/headers_optional_read_all.md
@@ -0,0 +1,13 @@
+--8<--
+docs/common/headers_optional_read_limits.md
+--8<--
+- `M3-Restrict-By-Tags-JSON`:
+ If this header is set it can ensure specific label matching is performed as part
+of every query including series metadata endpoints. As an example, the following
+header would unconditionally cause `globaltag=somevalue` to be a part of all queries
+issued regardless of if they include the label or not in a query and also strip the
+"globaltag" from appearing as a label in any of the resulting timeseries:
+```
+M3-Restrict-By-Tags-JSON: '{"match":[{"name":"globaltag","type":"EQUAL","value":"somevalue"}],"strip":["globaltag"]}'
+```
+
\ No newline at end of file
diff --git a/docs/common/headers_optional_read_limits.md b/docs/common/headers_optional_read_limits.md
new file mode 100644
index 0000000000..1f262eb220
--- /dev/null
+++ b/docs/common/headers_optional_read_limits.md
@@ -0,0 +1,6 @@
+- `M3-Limit-Max-Series`:
+ If this header is set it will override any configured per query time series limit. If the limit is hit, it will either return a partial result or an error based on the require exhaustive configuration set.
+- `M3-Limit-Max-Docs`:
+ If this header is set it will override any configured per query time series * blocks limit (docs limit). If the limit is hit, it will either return a partial result or an error based on the require exhaustive configuration set.
+- `M3-Limit-Require-Exhaustive`:
+ If this header is set it will override any configured require exhaustive setting. If "true" it will return an error if query hits a configured limit (such as series or docs limit) instead of a partial result. Otherwise if "false" it will return a partial result of the time series already matched with the response header `M3-Results-Limited` detailing the limit that was hit and a warning included in the response body.
\ No newline at end of file
diff --git a/docs/common/headers_optional_read_write.md b/docs/common/headers_optional_read_write_all.md
similarity index 96%
rename from docs/common/headers_optional_read_write.md
rename to docs/common/headers_optional_read_write_all.md
index 8dd8edbfe5..e08c5b8690 100644
--- a/docs/common/headers_optional_read_write.md
+++ b/docs/common/headers_optional_read_write_all.md
@@ -7,4 +7,4 @@
- `M3-Storage-Policy`:
If this header is set, it determines which aggregated namespace to read/write metrics directly to/from (bypassing any aggregation).
The value of the header must be in the format of `resolution:retention` in duration shorthand. e.g. `1m:48h` specifices 1 minute resolution and 48 hour retention. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
-Here is [an example](https://github.com/m3db/m3/blob/master/scripts/docker-integration-tests/prometheus/test.sh#L126-L146) of querying metrics from a specific namespace.
+Here is [an example](https://github.com/m3db/m3/blob/master/scripts/docker-integration-tests/prometheus/test.sh#L126-L146) of querying metrics from a specific namespace.
\ No newline at end of file
diff --git a/docs/common/headers_optional_write_all.md b/docs/common/headers_optional_write_all.md
new file mode 100644
index 0000000000..58be43b54a
--- /dev/null
+++ b/docs/common/headers_optional_write_all.md
@@ -0,0 +1,9 @@
+- `M3-Map-Tags-JSON`:
+ If this header is set it enables dynamically mutating tags in a Prometheus write request. See issue
+[2254](https://github.com/m3db/m3/issues/2254) for further context.
+Currently only `write` is supported. As an example, the following header would unconditionally cause
+`globaltag=somevalue` to be added to all metrics in a write request:
+```
+M3-Map-Tags-JSON: '{"tagMappers":[{"write":{"tag":"globaltag","value":"somevalue"}}]}'
+```
+
\ No newline at end of file
diff --git a/docs/common/headers_placement_namespace.md b/docs/common/headers_placement_namespace.md
index da022496ed..ad99da4e7e 100644
--- a/docs/common/headers_placement_namespace.md
+++ b/docs/common/headers_placement_namespace.md
@@ -2,4 +2,4 @@
This header is used to specify the cluster environment name. If not set, the default `default_env` is used.
- `Cluster-Zone-Name`:
- This header is used to specify the cluster zone name. If not set, the default `embedded` is used.
+ This header is used to specify the cluster zone name. If not set, the default `embedded` is used.
\ No newline at end of file
diff --git a/docs/community/index.md b/docs/community/index.md
index c4e582d84f..4c0d01ba29 100644
--- a/docs/community/index.md
+++ b/docs/community/index.md
@@ -6,7 +6,7 @@ Feel free to contact us through any of the following channels:
1. Posting on the [M3 Google group](https://groups.google.com/forum/#!forum/m3db)
2. Opening issues on the [M3 GitHub page](https://github.com/m3db/m3/issues)
-3. Chatting us on the official [M3 Gitter channel](https://gitter.im/m3db/Lobby)
+3. Chatting us on the official [Slack](http://bit.ly/m3slack)
## GitHub/OSS
diff --git a/docs/coordinator/index.md b/docs/coordinator/index.md
deleted file mode 100644
index 82b592ef2a..0000000000
--- a/docs/coordinator/index.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# M3 Coordinator, API for reading/writing metrics and M3 management
-
-M3 Coordinator is a service that coordinates reads and writes between upstream systems, such as Prometheus, and downstream systems, such as M3DB.
-
-It also provides management APIs to setup and configure different parts of M3.
-
-The coordinator is generally a bridge for read and writing different types of metrics formats and a management layer for M3.
diff --git a/docs/faqs/index.md b/docs/faqs/index.md
index d617421391..a28f9a4e10 100644
--- a/docs/faqs/index.md
+++ b/docs/faqs/index.md
@@ -1,7 +1,7 @@
# FAQs
- **Is there a way to disable M3DB embedded `etcd` and just use an external `etcd` cluster?**
-Yes, you can definitely do that. It's all just about setting the etcd endpoints in config as etcd hosts instead of M3DB hosts. See [these docs](https://m3db.github.io/m3/operational_guide/etcd/#configuring-an-external-etcd-cluster) for more information.
+Yes, you can definitely do that. It's all just about setting the etcd endpoints in config as etcd hosts instead of M3DB hosts. See [these docs](../operational_guide/etcd.md#external-etcd) for more information on configuring an external `etcd` cluster.
- **Is there a client that lets me send metrics to m3coordinator without going through Prometheus?**
Yes, you can use the [Prometheus remote write client](https://github.com/m3db/prometheus_remote_client_golang/).
@@ -20,7 +20,7 @@ Yes it stores the data (i.e. the timeseries datapoints) as well as the tags sinc
- **How are writes handled and how is the data kept consistent within M3DB?**
M3 uses quorum/majority consistency to ensure data is written to replicas in a way that can be read back consistently.
-For example, if you have a replication factor of 3 and your set your write and read consistencies to quorum, then all writes will only succeed if they make it to at least 2 of the 3 replicas, and reads will only succeed if they get results back from at least 2 of the 3 replicas
+For example, if you have a replication factor of 3 and you set your write and read consistencies to quorum, then all writes will only succeed if they make it to at least 2 of the 3 replicas, and reads will only succeed if they get results back from at least 2 of the 3 replicas
- **Do I need to restart M3DB if I add a namespace?**
If you’re adding namespaces, the m3dbnode process will pickup the new namespace without a restart.
@@ -41,10 +41,10 @@ Not yet, but that functionality is currently being worked on.
You can check if your nodes are snapshotting by looking at the `Background tasks` tab in the [M3DB Grafana dashboard](https://grafana.com/dashboards/8126).
- **How do you list all available API endpoints?**
-See [M3DB openhttps://m3db.io/openapi
+See [M3DB OpenAPI](https://m3db.io/openapi).
- **What is the recommended way to upgrade my M3 stack?**
-TBA
+See the [Upgrading M3](../operational_guide/upgrading_m3.md) guide.
- **When graphing my Prometheus data in Grafana, I see gaps. How do I resolve this?**
This is due to M3 having a concept of `null` datapoints whereas Prometheus does not. To resolve this, change `Stacking & Null value` to `Connected` under the `Visualization` tab of your graph.
@@ -73,4 +73,4 @@ TBA
Refer to the [Namespace configuration guide](../operational_guide/namespace_configuration.md).
- **How can I see the cardinality of my metrics?**
-Currently, the best way is to go to the [M3DB Node Details Dashboard](https://grafana.com/grafana/dashboards/8126) and look at the `Ticking` panel. However, this is not entirely accurate because of the way data is stored in M3DB -- time series are stored inside time-based blocks that you configure. In actuality, the `Ticking` graph shows you how many unique series there are for the most recent block that has persisted. In the future, we plan to introduce easier ways to determine the number of unique time series.
\ No newline at end of file
+Currently, the best way is to go to the [M3DB Node Details Dashboard](https://grafana.com/grafana/dashboards/8126) and look at the `Ticking` panel. However, this is not entirely accurate because of the way data is stored in M3DB -- time series are stored inside time-based blocks that you configure. In actuality, the `Ticking` graph shows you how many unique series there are for the most recent block that has persisted. In the future, we plan to introduce easier ways to determine the number of unique time series.
diff --git a/docs/how_to/aggregator.md b/docs/how_to/aggregator.md
new file mode 100644
index 0000000000..db20a5142f
--- /dev/null
+++ b/docs/how_to/aggregator.md
@@ -0,0 +1,408 @@
+# Setting up M3 Aggregator
+
+## Introduction
+
+`m3aggregator` is used to cluster stateful downsampling and rollup of metrics before they are store in M3DB. The M3 Coordinator also performs this role but is not cluster aware. This means metrics will not get aggregated properly if you send metrics in round robin fashion to multiple M3 Coordinators for the same metrics ingestion source (e.g. Prometheus server).
+
+Similar to M3DB, `m3aggregator` supports clustering and replication by default. This means that metrics are correctly routed to the instance(s) responsible for aggregating each metric and multiple `m3aggregator` replicas can be configured such that there are no single points of failure for aggregation.
+
+## Configuration
+
+Before setting up m3aggregator, make sure that you have at least [one M3DB node running](single_node.md) and a dedicated m3coordinator setup.
+
+We highly recommend running with at least a replication factor 2 for a `m3aggregator` deployment. If you run with replication factor 1 then when you restart an aggregator it will temporarily interrupt good the stream of aggregated metrics and there will be some data loss.
+
+### Topology
+
+#### Initializing aggregator topology
+
+You can setup a m3aggregator topology by issuing a request to your coordinator (be sure to use your own hostnames, number of shards and replication factor):
+```bash
+curl -vvvsSf -H "Cluster-Environment-Name: namespace/m3db-cluster-name" -X POST http://m3dbnode-with-embedded-coordinator:7201/api/v1/services/m3aggregator/placement/init -d '{
+ "num_shards": 64,
+ "replication_factor": 2,
+ "instances": [
+ {
+ "id": "m3aggregator01:6000",
+ "isolation_group": "availability-zone-a",
+ "zone": "embedded",
+ "weight": 100,
+ "endpoint": "m3aggregator01:6000",
+ "hostname": "m3aggregator01",
+ "port": 6000
+ },
+ {
+ "id": "m3aggregator02:6000",
+ "isolation_group": "availability-zone-b",
+ "zone": "embedded",
+ "weight": 100,
+ "endpoint": "m3aggregator02:6000",
+ "hostname": "m3aggregator02",
+ "port": 6000
+ }
+ ]
+}'
+```
+
+#### Initializing m3msg topic for m3aggregator to receive from m3coordinators to aggregate metrics
+
+Now we must setup a topic for the `m3aggregator` to receive unaggregated metrics from `m3coordinator` instances:
+
+```bash
+curl -vvvsSf -H "Cluster-Environment-Name: namespace/m3db-cluster-name" -H "Topic-Name: aggregator_ingest" -X POST http://m3dbnode-with-embedded-coordinator:7201/api/v1/topic/init -d '{
+ "numberOfShards": 64
+}'
+```
+
+#### Add m3aggregagtor consumer group to ingest topic
+
+Add the `m3aggregator` placement to receive traffic from the topic (make sure to set message TTL to match your desired maximum in memory retry message buffer):
+```bash
+curl -vvvsSf -H "Cluster-Environment-Name: namespace/m3db-cluster-name" -H "Topic-Name: aggregator_ingest" -X POST http://m3dbnode-with-embedded-coordinator:7201/api/v1/topic -d '{
+ "consumerService": {
+ "serviceId": {
+ "name": "m3aggregator",
+ "environment": "namespace/m3db-cluster-name",
+ "zone": "embedded"
+ },
+ "consumptionType": "REPLICATED",
+ "messageTtlNanos": "300000000000"
+ }
+}'
+```
+
+**Note:** 300000000000 nanoseconds is a TTL of 5 minutes for messages to rebuffer for retry.
+
+#### Initializing m3msg topic for m3coordinator to receive from m3aggregator to write to M3DB
+
+Now we must setup a topic for the `m3coordinator` to receive aggregated metrics from `m3aggregator` instances to write to M3DB:
+```bash
+curl -vvvsSf -H "Cluster-Environment-Name: namespace/m3db-cluster-name" -H "Topic-Name: aggregated_metrics" -X POST http://m3dbnode-with-embedded-coordinator:7201/api/v1/topic/init -d '{
+ "numberOfShards": 64
+}'
+```
+
+#### Initializing m3coordinator topology
+
+Then `m3coordinator` instances need to be configured to receive traffic for this topic (note ingest at port 7507 must match the configured port for your `m3coordinator` ingest server, see config at bottom of this guide):
+```bash
+curl -vvvsSf -H "Cluster-Environment-Name: namespace/m3db-cluster-name" -X POST http://m3dbnode-with-embedded-coordinator:7201/api/v1/services/m3coordinator/placement/init -d '{
+ "instances": [
+ {
+ "id": "m3coordinator01",
+ "zone": "embedded",
+ "endpoint": "m3coordinator01:7507",
+ "hostname": "m3coordinator01",
+ "port": 7507
+ }
+ ]
+}'
+```
+
+**Note:** When you add or remove `m3coordinator` instances they must be added to this placement.
+
+#### Add m3coordinator consumer group to outbound topic
+
+Add the `m3coordinator` placement to receive traffic from the topic (make sure to set message TTL to match your desired maximum in memory retry message buffer):
+```bash
+curl -vvvsSf -H "Cluster-Environment-Name: namespace/m3db-cluster-name" -H "Topic-Name: aggregated_metrics" -X POST http://m3dbnode-with-embedded-coordinator:7201/api/v1/topic -d '{
+ "consumerService": {
+ "serviceId": {
+ "name": "m3coordinator",
+ "environment": "namespace/m3db-cluster-name",
+ "zone": "embedded"
+ },
+ "consumptionType": "SHARED",
+ "messageTtlNanos": "300000000000"
+ }
+}'
+```
+
+**Note:** 300000000000 nanoseconds is a TTL of 5 minutes for messages to rebuffer for retry.
+
+### Running
+
+#### Dedicated Coordinator
+
+Metrics will still arrive at the `m3coordinator`, they simply need to be forwarded to an `m3aggregator`. The `m3coordinator` then also needs to receive metrics that have been aggregated from the `m3aggregator` and store them in M3DB, so running an ingestion server should be configured.
+
+Here is the config you should add to your `m3coordinator`:
+```yaml
+# This is for sending metrics to the remote m3aggregators
+downsample:
+ remoteAggregator:
+ client:
+ type: m3msg
+ m3msg:
+ producer:
+ writer:
+ topicName: aggregator_ingest
+ topicServiceOverride:
+ zone: embedded
+ environment: namespace/m3db-cluster-name
+ placement:
+ isStaged: true
+ placementServiceOverride:
+ namespaces:
+ placement: /placement
+ connection:
+ numConnections: 4
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
+
+# This is for configuring the ingestion server that will receive metrics from the m3aggregators on port 7507
+ingest:
+ ingester:
+ workerPoolSize: 10000
+ opPool:
+ size: 10000
+ retry:
+ maxRetries: 3
+ jitter: true
+ logSampleRate: 0.01
+ m3msg:
+ server:
+ listenAddress: "0.0.0.0:7507"
+ retry:
+ maxBackoff: 10s
+ jitter: true
+```
+
+#### M3 Aggregator
+
+You can run `m3aggregator` by either building and running the binary yourself:
+
+```bash
+make m3aggregator
+./bin/m3aggregator -f ./src/aggregator/config/m3aggregator.yml
+```
+
+Or you can run it with Docker using the Docker file located at `docker/m3aggregator/Dockerfile` or the publicly provided image `quay.io/m3db/m3aggregator:latest`.
+
+You can use a config like so, making note of the topics used such as `aggregator_ingest` and `aggregated_metrics` and the corresponding environment `namespace/m3db-cluster-name`:
+
+```yaml
+logging:
+ level: info
+
+metrics:
+ scope:
+ prefix: m3aggregator
+ prometheus:
+ onError: none
+ handlerPath: /metrics
+ listenAddress: 0.0.0.0:6002
+ timerType: histogram
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+
+m3msg:
+ server:
+ listenAddress: 0.0.0.0:6000
+ retry:
+ maxBackoff: 10s
+ jitter: true
+ consumer:
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
+
+http:
+ listenAddress: 0.0.0.0:6001
+ readTimeout: 60s
+ writeTimeout: 60s
+
+kvClient:
+ etcd:
+ env: namespace/m3db-cluster-name
+ zone: embedded
+ service: m3aggregator
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - dbnode01:2379
+
+runtimeOptions:
+ kvConfig:
+ environment: namespace/m3db-cluster-name
+ zone: embedded
+ writeValuesPerMetricLimitPerSecondKey: write-values-per-metric-limit-per-second
+ writeValuesPerMetricLimitPerSecond: 0
+ writeNewMetricLimitClusterPerSecondKey: write-new-metric-limit-cluster-per-second
+ writeNewMetricLimitClusterPerSecond: 0
+ writeNewMetricNoLimitWarmupDuration: 0
+
+aggregator:
+ hostID:
+ resolver: environment
+ envVarName: M3AGGREGATOR_HOST_ID
+ instanceID:
+ type: host_id
+ verboseErrors: true
+ metricPrefix: ""
+ counterPrefix: ""
+ timerPrefix: ""
+ gaugePrefix: ""
+ aggregationTypes:
+ counterTransformFnType: empty
+ timerTransformFnType: suffix
+ gaugeTransformFnType: empty
+ aggregationTypesPool:
+ size: 1024
+ quantilesPool:
+ buckets:
+ - count: 256
+ capacity: 4
+ - count: 128
+ capacity: 8
+ stream:
+ eps: 0.001
+ capacity: 32
+ streamPool:
+ size: 4096
+ samplePool:
+ size: 4096
+ floatsPool:
+ buckets:
+ - count: 4096
+ capacity: 16
+ - count: 2048
+ capacity: 32
+ - count: 1024
+ capacity: 64
+ client:
+ type: m3msg
+ m3msg:
+ producer:
+ writer:
+ topicName: aggregator_ingest
+ topicServiceOverride:
+ zone: embedded
+ environment: namespace/m3db-cluster-name
+ placement:
+ isStaged: true
+ placementServiceOverride:
+ namespaces:
+ placement: /placement
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
+ placementManager:
+ kvConfig:
+ namespace: /placement
+ environment: namespace/m3db-cluster-name
+ zone: embedded
+ placementWatcher:
+ key: m3aggregator
+ initWatchTimeout: 10s
+ hashType: murmur32
+ bufferDurationBeforeShardCutover: 10m
+ bufferDurationAfterShardCutoff: 10m
+ bufferDurationForFutureTimedMetric: 10m # Allow test to write into future.
+ resignTimeout: 1m
+ flushTimesManager:
+ kvConfig:
+ environment: namespace/m3db-cluster-name
+ zone: embedded
+ flushTimesKeyFmt: shardset/%d/flush
+ flushTimesPersistRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 2s
+ maxRetries: 3
+ electionManager:
+ election:
+ leaderTimeout: 10s
+ resignTimeout: 10s
+ ttlSeconds: 10
+ serviceID:
+ name: m3aggregator
+ environment: namespace/m3db-cluster-name
+ zone: embedded
+ electionKeyFmt: shardset/%d/lock
+ campaignRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 2s
+ forever: true
+ jitter: true
+ changeRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 5s
+ forever: true
+ jitter: true
+ resignRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 5s
+ forever: true
+ jitter: true
+ campaignStateCheckInterval: 1s
+ shardCutoffCheckOffset: 30s
+ flushManager:
+ checkEvery: 1s
+ jitterEnabled: true
+ maxJitters:
+ - flushInterval: 5s
+ maxJitterPercent: 1.0
+ - flushInterval: 10s
+ maxJitterPercent: 0.5
+ - flushInterval: 1m
+ maxJitterPercent: 0.5
+ - flushInterval: 10m
+ maxJitterPercent: 0.5
+ - flushInterval: 1h
+ maxJitterPercent: 0.25
+ numWorkersPerCPU: 0.5
+ flushTimesPersistEvery: 10s
+ maxBufferSize: 5m
+ forcedFlushWindowSize: 10s
+ flush:
+ handlers:
+ - dynamicBackend:
+ name: m3msg
+ hashType: murmur32
+ producer:
+ writer:
+ topicName: aggregated_metrics
+ topicServiceOverride:
+ zone: embedded
+ environment: namespace/m3db-cluster-name
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
+ passthrough:
+ enabled: true
+ forwarding:
+ maxConstDelay: 5m # Need to add some buffer window, since timed metrics by default are delayed by 1min.
+ entryTTL: 1h
+ entryCheckInterval: 10m
+ maxTimerBatchSizePerWrite: 140
+ defaultStoragePolicies: []
+ maxNumCachedSourceSets: 2
+ discardNaNAggregatedValues: true
+ entryPool:
+ size: 4096
+ counterElemPool:
+ size: 4096
+ timerElemPool:
+ size: 4096
+ gaugeElemPool:
+ size: 4096
+```
+
+## Usage
+
+Send metrics as usual to your `m3coordinator` instances in round robin fashion (or any other load balancing strategy), the metrics will be forwarded to the `m3aggregator` instances, then once aggregated they will be returned to the `m3coordinator` instances to write to M3DB.
diff --git a/docs/how_to/kubernetes.md b/docs/how_to/kubernetes.md
index 43396fc82a..afc459c2ed 100644
--- a/docs/how_to/kubernetes.md
+++ b/docs/how_to/kubernetes.md
@@ -35,6 +35,8 @@ kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/storage-f
If you wish to use your cloud provider's default remote disk, or another disk class entirely, you'll have to modify them
manifests.
+If your Kubernetes cluster spans multiple availability zones, it's important to specify a [Volume Binding Mode](https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) of `WaitForFirstConsumer` in your StorageClass to delay the binding of the PersistentVolume until the Pod is created.
+
### Kernel Configuration
We provide a Kubernetes daemonset that can make setting host-level sysctls easier. Please see the [kernel][kernel] docs
diff --git a/docs/how_to/query.md b/docs/how_to/query.md
index 873c017e65..0c442aed61 100644
--- a/docs/how_to/query.md
+++ b/docs/how_to/query.md
@@ -2,7 +2,7 @@
## Introduction
-m3query is used to query data that is stored in M3DB. For instance, if you are using the Prometheus remote write endpoint with [m3coordinator](../integrations/prometheus.md), you can use m3query instead of the Prometheus remote read endpoint. By doing so, you get all of the benefits of m3query's engine such as [block processing](http://m3db.github.io/m3/query_engine/architecture/blocks/). Furthermore, since m3query provides a Prometheus compatible API, you can use 3rd party graphing and alerting solutions like Grafana.
+m3query is used to query data that is stored in M3DB. For instance, if you are using the Prometheus remote write endpoint with [m3coordinator](../integrations/prometheus.md), you can use m3query instead of the Prometheus remote read endpoint. By doing so, you get all of the benefits of m3query's engine such as [block processing](../m3query/architecture/blocks.md). Furthermore, since m3query provides a Prometheus compatible API, you can use 3rd party graphing and alerting solutions like Grafana.
## Configuration
@@ -85,7 +85,7 @@ If you have been running m3query or m3coordinator already, you may want to count
An example of a configuration file for a standalone m3query instance with the ID generation scheme can be found [here](https://github.com/m3db/m3/blob/master/scripts/docker-integration-tests/prometheus/m3coordinator.yml). If you're running m3query or m3coordinator embedded, these configuration options should be nested under the `coordinator:` heading, as seen [here](https://github.com/m3db/m3/blob/28fe5e1e430a651a1d66a0a3e22617b6a7f59ec6/src/dbnode/config/m3dbnode-all-config.yml#L33).
-If none of these options work for you, or you would like further clarification, please stop by our [gitter channel](https://gitter.im/m3db/Lobby) and we'll be happy to help you.
+If none of these options work for you, or you would like further clarification, please stop by our [Slack](http://bit.ly/m3slack) and we'll be happy to help you.
## Grafana
diff --git a/docs/index.md b/docs/index.md
index 4343c67b59..9283ac6f0a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -13,7 +13,7 @@ M3 has several features, provided as discrete components, which make it an ideal
* A distributed time series database, [M3DB](m3db/index.md), that provides scalable storage for time series data and a reverse index.
* A sidecar process, [M3Coordinator](integrations/prometheus.md), that allows M3DB to act as the long-term storage for Prometheus.
-* A distributed query engine, [M3Query](query_engine/index.md), with native support for PromQL and Graphite (M3QL coming soon).
+* A distributed query engine, [M3Query](m3query/index.md), with native support for PromQL and Graphite (M3QL coming soon).
* An aggregation tier, M3Aggregator, that runs as a dedicated metrics aggregator/downsampler allowing metrics to be stored at various retentions at different resolutions.
@@ -33,7 +33,6 @@ Getting started with M3 is as easy as following one of the How-To guides.
For support with any issues, questions about M3 or its operation, or to leave any comments, the team can be
reached in a variety of ways:
-* [Slack (main chat channel)](http://bit.ly/m3slack)
-* [Gitter (old chat channel)](https://gitter.im/m3db/Lobby)
+* [Slack](http://bit.ly/m3slack)
* [Email](https://groups.google.com/forum/#!forum/m3db)
* [Github issues](https://github.com/m3db/m3/issues)
diff --git a/docs/integrations/influxdb.md b/docs/integrations/influxdb.md
new file mode 100644
index 0000000000..3333516eeb
--- /dev/null
+++ b/docs/integrations/influxdb.md
@@ -0,0 +1,31 @@
+# InfluxDB
+
+This document is a getting started guide to integrating InfluxDB data pipelines
+with M3.
+
+## Writing metrics using InfluxDB line protocol
+
+To write metrics to M3 using the InfluxDB line protocol, simply form the request
+as you typically would line separated and POST the body to `/api/v1/influxdb/write`
+on the coordinator. Note that timestamp is in nanoseconds from Unix epoch.
+
+This example writes two metrics `weather_temperature` and `weather_wind` using
+the current time in nanoseconds as the timestamp:
+```bash
+curl -i -X POST "http://localhost:7201/api/v1/influxdb/write" --data-binary "weather,location=us-midwest temperature=82,wind=42 $(expr $(date +%s) \* 1000000000)"
+```
+
+## Querying for metrics
+
+After successfully written you can query for these metrics using PromQL. All
+measurements are translated into metric names by concatenating the name with
+the measurement name.
+
+The previous example forms the two following Prometheus time series:
+```
+weather_temperature{location="us-midwest"} 82
+weather_wind{location="us-midwest"} 42
+```
+
+All metric names and labels are rewritten to contain only alphanumeric
+characters. Any non-alphanumeric characters are rewritten with an underscore.
diff --git a/docs/coordinator/api/remote.md b/docs/m3coordinator/api/remote.md
similarity index 92%
rename from docs/coordinator/api/remote.md
rename to docs/m3coordinator/api/remote.md
index 23fa4b37ad..8553fc6115 100644
--- a/docs/coordinator/api/remote.md
+++ b/docs/m3coordinator/api/remote.md
@@ -23,7 +23,10 @@ None.
#### Optional
--8<--
-docs/common/headers_optional_read_write.md
+docs/common/headers_optional_read_write_all.md
+--8<--
+--8<--
+docs/common/headers_optional_write_all.md
--8<--
### Data Params
@@ -71,7 +74,7 @@ promremotecli_log 2019/06/25 04:13:56 write success
# quay.io/m3db/prometheus_remote_client_golang@sha256:fc56df819bff9a5a087484804acf3a584dd4a78c68900c31a28896ed66ca7e7b
```
-For more details on querying data in PromQL that was written using this endpoint, see the [query API documentation](../../query_engine/api/).
+For more details on querying data in PromQL that was written using this endpoint, see the [query API documentation](../../m3query/api/).
## Remote Read
@@ -94,7 +97,10 @@ None.
#### Optional
--8<--
-docs/common/headers_optional_read_write.md
+docs/common/headers_optional_read_write_all.md
+--8<--
+--8<--
+docs/common/headers_optional_read_all.md
--8<--
### Data Params
diff --git a/docs/m3coordinator/index.md b/docs/m3coordinator/index.md
new file mode 100644
index 0000000000..65b6518f3f
--- /dev/null
+++ b/docs/m3coordinator/index.md
@@ -0,0 +1,13 @@
+# M3 Coordinator, stateless API server for reading/writing metrics and M3 management
+
+M3 Coordinator is a service that coordinates reads and writes between upstream systems, such as Prometheus, and downstream systems, such as M3DB.
+
+It also provides management APIs to setup and configure different parts of M3.
+
+The coordinator is generally a bridge for read and writing different types of metrics formats and a management layer for M3.
+
+**Note**: M3DB by default includes the M3 Coordinator accessible on port 7201.
+For production deployments it is recommended to deploy it as a
+dedicated service to ensure you can scale the write coordination role separately
+and independently to database nodes as an isolated application separate from
+the M3DB database role.
diff --git a/docs/m3db/architecture/storage.md b/docs/m3db/architecture/storage.md
index 9a190f4128..22e18dd042 100644
--- a/docs/m3db/architecture/storage.md
+++ b/docs/m3db/architecture/storage.md
@@ -19,21 +19,24 @@ A fileset has the following files:
* **Checkpoint file:** Stores a digest of the digests file and written at the succesful completion of a fileset volume being persisted, allows for quickly checking if a volume was completed.
```
- ┌─────────────────────┐
-┌─────────────────────┐ ┌─────────────────────┐ │ Index File │
-│ Info File │ │ Summaries File │ │ (sorted by ID) │
-├─────────────────────┤ │ (sorted by ID) │ ├─────────────────────┤
-│- Block Start │ ├─────────────────────┤ ┌─>│- Idx │
-│- Block Size │ │- Idx │ │ │- ID │
-│- Entries (Num) │ │- ID │ │ │- Size │
-│- Major Version │ │- Index Entry Offset ├──┘ │- Checksum │
-│- Summaries (Num) │ └─────────────────────┘ │- Data Entry Offset ├──┐
-│- BloomFilter (K/M) │ │- Encoded Tags | |
-│- Snapshot Time │ └─────────────────────┘ │
-│- Type (Flush/Snap) │ │
-└─────────────────────┘ │
- │
- ┌─────────────────────┐ ┌───────────────────────────┘
+ ┌───────────────────────┐
+┌─────────────────────┐ ┌─────────────────────┐ │ Index File │
+│ Info File │ │ Summaries File │ │ (sorted by ID) │
+├─────────────────────┤ │ (sorted by ID) │ ├───────────────────────┤
+│- Block Start │ ├─────────────────────┤ ┌─>│- Idx │
+│- Block Size │ │- Idx │ │ │- ID │
+│- Entries (Num) │ │- ID │ │ │- Size │
+│- Major Version │ │- Index Entry Offset ├──┘ │- Checksum │
+│- Summaries (Num) │ └─────────────────────┘ │- Data Entry Offset ├──┐
+│- BloomFilter (K/M) │ │- Encoded Tags │ │
+│- Snapshot Time │ │- Index Entry Checksum │ │
+│- Type (Flush/Snap) │ └───────────────────────┘ │
+│- Snapshot ID │ │
+│- Volume Index │ │
+│- Minor Version │ │
+└─────────────────────┘ │
+ │
+ ┌─────────────────────┐ ┌─────────────────────────────┘
┌─────────────────────┐ │ Bloom Filter File │ │
│ Digests File │ ├─────────────────────┤ │ ┌─────────────────────┐
├─────────────────────┤ │- Bitset │ │ │ Data File │
diff --git a/docs/query_engine/api/index.md b/docs/m3query/api/index.md
similarity index 94%
rename from docs/query_engine/api/index.md
rename to docs/m3query/api/index.md
index af8329854d..3dc23a17dc 100644
--- a/docs/query_engine/api/index.md
+++ b/docs/m3query/api/index.md
@@ -33,7 +33,10 @@ Query using PromQL and returns JSON datapoints compatible with the Prometheus Gr
#### Optional
--8<--
-docs/common/headers_optional_read_write.md
+docs/common/headers_optional_read_write_all.md
+--8<--
+--8<--
+docs/common/headers_optional_read_all.md
--8<--
### Data Params
@@ -42,8 +45,8 @@ None.
### Sample Call
-
```bash
diff --git a/docs/query_engine/architecture/blocks.md b/docs/m3query/architecture/blocks.md
similarity index 100%
rename from docs/query_engine/architecture/blocks.md
rename to docs/m3query/architecture/blocks.md
diff --git a/docs/query_engine/architecture/fanout.md b/docs/m3query/architecture/fanout.md
similarity index 86%
rename from docs/query_engine/architecture/fanout.md
rename to docs/m3query/architecture/fanout.md
index 95a4bee4f6..cf098e591a 100644
--- a/docs/query_engine/architecture/fanout.md
+++ b/docs/m3query/architecture/fanout.md
@@ -6,4 +6,4 @@ Since m3query does not currently have a view into the M3DB index, fanout to mult
The general approach is therefore to attempt to fanout to any namespace which has a complete view of all metrics, for example, `Unaggregated`, and take that if it fulfills the query range; if not, m3query will attempt to stitch together namespaces with longer retentions to try and build the most complete possible view of stored metrics.
-For further details, please ask questions on [our gitter](https://gitter.im/m3db/Lobby), and we'll be happy to help!
+For further details, please ask questions on [Slack](http://bit.ly/m3slack), and we'll be happy to help!
diff --git a/docs/query_engine/architecture/functions.md b/docs/m3query/architecture/functions.md
similarity index 100%
rename from docs/query_engine/architecture/functions.md
rename to docs/m3query/architecture/functions.md
diff --git a/docs/m3query/architecture/index.md b/docs/m3query/architecture/index.md
new file mode 100644
index 0000000000..6fcc7bcf84
--- /dev/null
+++ b/docs/m3query/architecture/index.md
@@ -0,0 +1,7 @@
+# Architecture
+
+**Please note:** This documentation is a work in progress and more detail is required.
+
+## Overview
+
+M3 Query and M3 Coordinator are written entirely in Go, M3 Query is as a query engine for [M3DB](https://docs.m3db.io/) and M3 Coordinator is a remote read/write endpoint for Prometheus and M3DB. To learn more about Prometheus's remote endpoints and storage, [see here](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
diff --git a/docs/query_engine/config/annotated_config.md b/docs/m3query/config/annotated_config.md
similarity index 100%
rename from docs/query_engine/config/annotated_config.md
rename to docs/m3query/config/annotated_config.md
diff --git a/docs/query_engine/config/annotated_config.yaml b/docs/m3query/config/annotated_config.yaml
similarity index 89%
rename from docs/query_engine/config/annotated_config.yaml
rename to docs/m3query/config/annotated_config.yaml
index 4c7080847e..c262f804a2 100644
--- a/docs/query_engine/config/annotated_config.yaml
+++ b/docs/m3query/config/annotated_config.yaml
@@ -65,7 +65,7 @@ writeWorkerPoolPolicy:
size:
tagOptions:
- # See here for more information: http://m3db.github.io/m3/how_to/query/#id-generation
+ # See here for more information: https://docs.m3db.io/how_to/query/#id-generation
idScheme:
# lookbackDuration defines, at each step, how long we lookback until we see a non-NaN value.
@@ -82,3 +82,9 @@ resultOptions:
# for quick local setup (which this config will send data to).
tracing:
backend: jaeger
+
+# Query configuration.
+query:
+ timeout:
+ # The default query engine is 'prometheus' but it could be switched to 'm3query'
+ defaultEngine:
\ No newline at end of file
diff --git a/docs/m3query/config/index.md b/docs/m3query/config/index.md
new file mode 100644
index 0000000000..319d30c011
--- /dev/null
+++ b/docs/m3query/config/index.md
@@ -0,0 +1,42 @@
+# Configuration
+
+## Default query engine
+
+By default M3 runs two query engines:
+
+- Prometheus (default) - robust and de-facto query language for metrics
+- M3 Query Engine - high-performance query engine but doesn't support all the functions yet
+
+Prometheus Query Engine is the default one when calling query endpoint:
+```
+http://localhost:7201/api/v1/query?query=count(http_requests)&time=1590147165
+```
+
+But you can switch between the two in the following ways:
+
+- Changing default query engine in config file (see `defaultEngine` parameter in [Configuration](annotated_config.md))
+- Passing HTTP header `M3-Engine`:
+
+ ```curl -H "M3-Engine: m3query" "http://localhost:7201/api/v1/query?query=count(http_requests)&time=1590147165"```
+
+ or
+
+ ```curl -H "M3-Engine: prometheus" "http://localhost:7201/api/v1/query?query=count(http_requests)&time=1590147165"```
+
+- Passing HTTP query URL parameter `engine`:
+
+ ```curl "http://localhost:7201/api/v1/query?engine=m3query&query=count(http_requests)&time=1590147165"```
+
+ or
+
+ ```curl "http://localhost:7201/api/v1/query?engine=prometheus&query=count(http_requests)&time=1590147165"```
+
+- Using different URLs:
+ - `/prometheus/api/v1/*` - to call Prometheus query engine
+ - `/m3query/api/v1/*` - to call M3 Query query engine
+
+ ```curl "http://localhost:7201/m3query/api/v1/query?query=count(http_requests)&time=1590147165"```
+
+ or
+
+ ```curl "http://localhost:7201/prometheus/api/v1/query?query=count(http_requests)&time=1590147165"```
diff --git a/docs/m3query/index.md b/docs/m3query/index.md
new file mode 100644
index 0000000000..2cac842cae
--- /dev/null
+++ b/docs/m3query/index.md
@@ -0,0 +1,14 @@
+# M3 Query, a statelees query server for M3DB and Prometheus
+
+M3 Query is a service that exposes all metrics query endpoints along with
+metrics time series metadata APIs that return dimensions and labels of metrics
+that reside in a M3DB cluster.
+
+**Note**: M3 Coordinator, and by proxy M3DB, by default includes the M3
+Query endpoints accessible on port 7201.
+For production deployments it is recommended to deploy it as a
+dedicated service to ensure you can scale the memory heavy query role separately
+from the metrics ingestion write path of writes through M3 Coordinator to M3DB
+database role nodes. This allows excessive queries to primarily affect the
+dedicated M3 Query service instead of interrupting service to the write
+ingestion pipeline.
diff --git a/docs/query_engine/roadmap.md b/docs/m3query/roadmap.md
similarity index 100%
rename from docs/query_engine/roadmap.md
rename to docs/m3query/roadmap.md
diff --git a/docs/operational_guide/bootstrapping_crash_recovery.md b/docs/operational_guide/bootstrapping_crash_recovery.md
index 936c1aa3cb..1d05c20b79 100644
--- a/docs/operational_guide/bootstrapping_crash_recovery.md
+++ b/docs/operational_guide/bootstrapping_crash_recovery.md
@@ -71,13 +71,13 @@ In this case, the `peers` bootstrapper running on node A will not be able to ful
└─────────────────────────┘ └───────────────────────┘ └──────────────────────┘
```
-Note that a bootstrap consistency level of `majority` is the default value, but can be modified by changing the value of the key `m3db.client.bootstrap-consistency-level` in [etcd](https://coreos.com/etcd/) to one of: `none`, `one`, `unstrict_majority` (attempt to read from majority, but settle for less if any errors occur), `majority` (strict majority), and `all`. For example, if an entire cluster with a replication factor of 3 was restarted simultaneously, all the nodes would get stuck in an infinite loop trying to peer bootstrap from each other and not achieving majority until an operator modified this value. Note that this can happen even if all the shards were in the `Available` state because M3DB nodes will reject all read requests for a shard until they have bootstrapped that shard (which has to happen everytime the node is restarted).
+Note that a bootstrap consistency level of `majority` is the default value, but can be modified by changing the value of the key `m3db.client.bootstrap-consistency-level` in [etcd](https://etcd.io/) to one of: `none`, `one`, `unstrict_majority` (attempt to read from majority, but settle for less if any errors occur), `majority` (strict majority), and `all`. For example, if an entire cluster with a replication factor of 3 was restarted simultaneously, all the nodes would get stuck in an infinite loop trying to peer bootstrap from each other and not achieving majority until an operator modified this value. Note that this can happen even if all the shards were in the `Available` state because M3DB nodes will reject all read requests for a shard until they have bootstrapped that shard (which has to happen everytime the node is restarted).
**Note**: Any bootstrappers configuration that does not include the `peers` bootstrapper will be unable to handle dynamic placement changes of any kind.
### Uninitialized Topology Bootstrapper
-The purpose of the `uninitialized_topology` bootstrapper is to succeed bootstraps for all time ranges for shards that have never been completely bootstrapped (at a cluster level). This allows us to run the default bootstrapper configuration of: `filesystem,commitlog,peers,topology_uninitialized` such that the `filesystem` and `commitlog` bootstrappers are used by default in node restarts, the `peers` bootstrapper is used for node adds/removes/replaces, and bootstraps still succeed for brand new placement where both the `commitlog` and `peers` bootstrappers will be unable to succeed any bootstraps. In other words, the `uninitialized_topology` bootstrapper allows us to place the `commitlog` bootstrapper *before* the `peers` bootstrapper and still succeed bootstraps with brand new placements without resorting to using the noop-all bootstrapper which suceeds bootstraps for all shard/time-ranges regardless of the status of the placement.
+The purpose of the `uninitialized_topology` bootstrapper is to succeed bootstraps for all time ranges for shards that have never been completely bootstrapped (at a cluster level). This allows us to run the default bootstrapper configuration of: `filesystem,commitlog,peers,uninitialized_topology` such that the `filesystem` and `commitlog` bootstrappers are used by default in node restarts, the `peers` bootstrapper is used for node adds/removes/replaces, and bootstraps still succeed for brand new placement where both the `commitlog` and `peers` bootstrappers will be unable to succeed any bootstraps. In other words, the `uninitialized_topology` bootstrapper allows us to place the `commitlog` bootstrapper *before* the `peers` bootstrapper and still succeed bootstraps with brand new placements without resorting to using the noop-all bootstrapper which suceeds bootstraps for all shard/time-ranges regardless of the status of the placement.
The `uninitialized_topology` bootstrapper determines whether a placement is "new" for a given shard by counting the number of nodes in the `Initializing` state and `Leaving` states and there are more `Initializing` than `Leaving`, then it succeeds the bootstrap because that means the placement has never reached a state where all nodes are `Available`.
@@ -97,7 +97,7 @@ In the general case, the node will use only the `filesystem` and `commitlog` boo
Additionally, if it is a brand new placement where even the `peers` bootstrapper cannot fulfill the bootstrap, this will be detected by the `uninitialized_topology` bootstrapper which will succeed the bootstrap.
-### filesystem,peers,uninitialized_topology (default)
+### filesystem,peers,uninitialized_topology
Everytime a node is restarted it will attempt to stream in all of the the data for any blocks that it has never flushed, which is generally the currently active block and possibly the previous block as well. This mode can be useful if you want to improve performance or save disk space by operating nodes without a commitlog, or want to force a repair of any unflushed blocks. This mode can lead to violations of M3DB's consistency guarantees due to the fact that commit logs are being ignored. In addition, if you lose a replication factors worth or more of hosts at the same time, the node will not be able to bootstrap unless an operator modifies the bootstrap consistency level configuration in etcd (see `peers` bootstrap section above). Finally, this mode adds additional network and resource pressure on other nodes in the cluster while one node is peer bootstrapping from them which can be problematic in catastrophic scenarios where all the nodes are trying to stream data from each other.
diff --git a/docs/operational_guide/etcd.md b/docs/operational_guide/etcd.md
index f348cc7f03..0d6ee98c70 100644
--- a/docs/operational_guide/etcd.md
+++ b/docs/operational_guide/etcd.md
@@ -16,7 +16,7 @@ Both `M3` and `etcd` are complex distributed systems, and trying to operate both
Instead, we recommend running an external `etcd` cluster that is isolated from the `M3` stack so that performing operations like node adds, removes, and replaces are easier.
-While M3 relies on `etcd` to provide strong consistency, the perations we use it for are all low-throughput so you should be able to operate a very low maintenance `etcd` cluster. [A 3-node setup for high availability](https://github.com/etcd-io/etcd/blob/v3.3.11/Documentation/faq.md#what-is-failure-tolerance) should be more than sufficient for most workloads.
+While M3 relies on `etcd` to provide strong consistency, the operations we use it for are all low-throughput so you should be able to operate a very low maintenance `etcd` cluster. [A 3-node setup for high availability](https://github.com/etcd-io/etcd/blob/v3.3.11/Documentation/faq.md#what-is-failure-tolerance) should be more than sufficient for most workloads.
## Configuring an External etcd Cluster
diff --git a/docs/operational_guide/index.md b/docs/operational_guide/index.md
index 4f7cd26458..1f1db09877 100644
--- a/docs/operational_guide/index.md
+++ b/docs/operational_guide/index.md
@@ -1 +1,3 @@
# Operational Guides
+
+This list of operational guides provide documentation for operating M3.
diff --git a/docs/operational_guide/mapping_rollup.md b/docs/operational_guide/mapping_rollup.md
index 2c32f99ae3..d6cfbf0a82 100644
--- a/docs/operational_guide/mapping_rollup.md
+++ b/docs/operational_guide/mapping_rollup.md
@@ -72,4 +72,81 @@ both the `1m:48h` and `30s:24h` namespaces.
## Rollup Rules
-Coming soon!
+Rollup rules are used to rollup metrics and aggregate in different ways by
+arbitrary dimensions before they are stored.
+
+Here's an example of creating a new monotonic counter called
+`http_request_rollup_no_pod_bucket` from a set of histogram metrics originally
+called `http_request_bucket`:
+
+```yaml
+downsample:
+ rules:
+ rollupRules:
+ - name: "http_request latency by route and git_sha without pod"
+ filter: "__name__:http_request_bucket k8s_pod:* le:* git_sha:* route:*"
+ transforms:
+ - transform:
+ type: "Increase"
+ - rollup:
+ metricName: "http_request_rollup_no_pod_bucket"
+ groupBy: ["le", "git_sha", "route", "status_code", "region"]
+ aggregations: ["Sum"]
+ - transform:
+ type: "Add"
+ storagePolicies:
+ - resolution: 30s
+ retention: 720h
+```
+
+**Note:** only metrics that contain all of the `group_by` tags will be rolled up.
+For example, in the above config, only `http_request_bucket` metrics that
+have all of the `group_by` labels present will be rolled up into the new
+metric `http_request_rollup_no_pod_bucket`.
+
+While the above example can be used to create a new rolled up metric,
+often times the goal of rollup rules is to eliminate the underlaying,
+raw metrics. In order to do this, a `mappingRule` will need to be
+added like in the following example (using the metric above as an example)
+with `drop` set to `true`. Additionally, if **all** of the underlaying metrics are
+being dropped, there is no need to change the metric name (e.g. in the
+`rollupRule`, the `metricName` field can be equal to the existing metric) --
+see below for an example.
+
+```yaml
+downsample:
+ rules:
+ mappingRules:
+ - name: "http_request latency by route and git_sha drop raw"
+ filter: "__name__:http_request_bucket k8s_pod:* le:* git_sha:* route:*"
+ drop: true
+ rollupRules:
+ - name: "http_request latency by route and git_sha without pod"
+ filter: "__name__:http_request_bucket k8s_pod:* le:* git_sha:* route:*"
+ transforms:
+ - transform:
+ type: "Increase"
+ - rollup:
+ metricName: "http_request_bucket" # metric name doesn't change
+ groupBy: ["le", "git_sha", "route", "status_code", "region"]
+ aggregations: ["Sum"]
+ - transform:
+ type: "Add"
+ storagePolicies:
+ - resolution: 30s
+ retention: 720h
+```
+
+**Note:** In order to store rolled up metrics in an `unaggregated` namespace,
+a matching `aggregated` namespace must be added to the coordinator config. For
+example, if in the above rule, the `720h` namespace under `storagePolicies`
+is `unaggregated`, the following will need to be added to the coordinator config.
+
+```yaml
+- namespace: default
+ resolution: 30s
+ retention: 720h
+ type: aggregated
+ downsample:
+ all: false
+```
diff --git a/docs/operational_guide/monitoring.md b/docs/operational_guide/monitoring.md
index 2827785b50..5ac49f8021 100644
--- a/docs/operational_guide/monitoring.md
+++ b/docs/operational_guide/monitoring.md
@@ -1,10 +1,10 @@
## Metrics
-TODO: document how to retrieve metrics for M3DB components.
+It is best to use Prometheus to monitor M3DB, M3 Coordinator and M3 Query using the [Grafana dashboards](https://github.com/m3db/m3/blob/master/integrations/grafana/).
## Logs
-TODO: document how to retrieve logs for M3DB components.
+Logs are printed to process output in JSON by default for semi-structured log processing.
## Tracing
@@ -65,8 +65,8 @@ If you'd like additional backends, we'd love to support them!
File an issue against M3 and we can work with you on how best to add
the backend. The first time's going to be a little rough--opentracing
unfortunately doesn't support Go plugins (yet--see
-https://github.com/opentracing/opentracing-go/issues/133), and `glide`'s
-update model means that adding dependencies directly will update
+https://github.com/opentracing/opentracing-go/issues/133), and Go's dependency
+model means that adding dependencies directly will update
*everything*, which isn't ideal for an isolated dependency change.
These problems are all solvable though,
and we'll work with you to make it happen!
diff --git a/docs/operational_guide/multiple_m3db_clusters.md b/docs/operational_guide/multiple_m3db_clusters.md
new file mode 100644
index 0000000000..1d9d1e0eca
--- /dev/null
+++ b/docs/operational_guide/multiple_m3db_clusters.md
@@ -0,0 +1,93 @@
+## Write to multiple M3DB clusters via m3coordinator
+
+### Overview:
+
+Default M3 architecture has the m3coordinator writing to and aggregating metrics from a single M3DB cluster. To map a single coordinator to more than one M3DB cluster, follow the below instructions.
+
+Use case(s):
+- Sending metrics to different namespaces for different retention periods, etc.
+
+### Instructions:
+
+1. Add `clusterManagement` to config file to add multiple M3BD clusters to m3coordinator:
+
+Example config file with `clusterManagement` (see end of the config):
+
+```bash
+clusters:
+ # Should match the namespace(s) set up in the DB nodes
+ - namespaces:
+ - namespace: 21d
+ retention: 504h
+ type: unaggregated
+ client:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /data/m3kv_default
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ -
+ -
+ -
+ writeRetry:
+ initialBackoff: 500ms
+ backoffFactor: 3
+ maxRetries: 2
+ jitter: true
+ fetchRetry:
+ initialBackoff: 500ms
+ backoffFactor: 2
+ maxRetries: 3
+ jitter: true
+ - namespaces:
+ - namespace: 90d
+ retention: 2160h
+ type: aggregated
+ resolution: 10m
+ - namespace: 500d
+ retention: 12000h
+ type: aggregated
+ resolution: 1h
+ client:
+ config:
+ service:
+ env: lts_env
+ zone: embedded
+ service: m3db
+ cacheDir: /data/m3kv_lts
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ -
+ -
+ -
+ writeRetry:
+ initialBackoff: 500ms
+ backoffFactor: 3
+ maxRetries: 2
+ jitter: true
+ fetchRetry:
+ initialBackoff: 500ms
+ backoffFactor: 2
+ maxRetries: 3
+ jitter: true
+tagOptions:
+ idScheme: quoted
+clusterManagement:
+ etcd:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /data/m3kv_default
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ -
+ -
+ -
+```
+2. Use the `Cluster-Environment-Name` header for any API requests to the m3coordinator.
\ No newline at end of file
diff --git a/docs/operational_guide/placement.md b/docs/operational_guide/placement.md
index 1c5f25ff37..ff36241ffe 100644
--- a/docs/operational_guide/placement.md
+++ b/docs/operational_guide/placement.md
@@ -6,7 +6,7 @@
A M3DB cluster has exactly one Placement. That placement maps the cluster's shard replicas to nodes. A cluster also has 0 or more namespaces (analogous to tables in other databases), and each node serves every namespace for the shards it owns. In other words, if the cluster topology states that node A owns shards 1, 2, and 3 then node A will own shards 1, 2, 3 for all configured namespaces in the cluster.
-M3DB stores its placement (mapping of which NODES are responsible for which shards) in [etcd](https://coreos.com/etcd/). There are three possible states that each node/shard pair can be in:
+M3DB stores its placement (mapping of which NODES are responsible for which shards) in [etcd](https://etcd.io/). There are three possible states that each node/shard pair can be in:
1. `Initializing`
2. `Available`
diff --git a/docs/operational_guide/placement_configuration.md b/docs/operational_guide/placement_configuration.md
index 4bb980a7e2..389c46dda7 100644
--- a/docs/operational_guide/placement_configuration.md
+++ b/docs/operational_guide/placement_configuration.md
@@ -118,7 +118,7 @@ curl -X POST localhost:7201/api/v1/services/m3db/placement/init -d '{
"endpoint": ":",
"hostname": "",
"port":
- },
+ }
]
}'
```
diff --git a/docs/operational_guide/repairs.md b/docs/operational_guide/repairs.md
index 2794f334e7..a191bd1b08 100644
--- a/docs/operational_guide/repairs.md
+++ b/docs/operational_guide/repairs.md
@@ -1,8 +1,10 @@
# Background Repairs (beta)
+**Note:** This feature is in beta and only available for use with M3DB when run with the inverted index off. It can be run with the inverted index on however metrics will not be re-indexed if they are repaired so will be invisible to that node for queries.
+
## Overview
-Background repairs enable M3DB to eventually reach a consistent state such that all nodes have identical view
+Background repairs enable M3DB to eventually reach a consistent state such that all nodes have identical view.
An M3DB cluster can be configured to repair itself in the background. If background repairs are enabled, M3DB nodes will continuously scan the metadata of other nodes. If a mismatch is detected, affected nodes will perform a repair such that each node in the cluster eventually settles on a consistent view of the data.
A repair is performed individually by each node when it detects a mismatch between its metadata and the metadata of its peers. Each node will stream the data for the relevant series, merge the data from its peers with its own, and then write out the resulting merged dataset to disk to make the repair durable. In other words, there is no coordination between individual nodes during the repair process, each node is detecting mismatches on its own and performing a "best effort" repair by merging all available data from all peers into a new stream.
diff --git a/docs/operational_guide/resource_limits.md b/docs/operational_guide/resource_limits.md
new file mode 100644
index 0000000000..15924ee0bf
--- /dev/null
+++ b/docs/operational_guide/resource_limits.md
@@ -0,0 +1,133 @@
+# Resource Limits and Preventing Abusive Reads/Writes
+
+This operational guide provides an overview of how to set resource limits on
+M3 components to prevent abusive reads/writes impacting availability or
+performance of M3 in a production environment.
+
+## M3DB
+
+### Configuring limits
+
+The best way to get started protecting M3DB nodes is to set a few limits on the
+top level `limits` config stanza for M3DB.
+
+When using M3DB for metrics workloads, queries arrive as a set of matchers
+that select time series based on certain dimensions. The primary mechanism to
+protect against these matchers matching huge amounts of data in an unbounded
+way is to set a maximum limit for the amount of time series blocks allowed to
+be matched and consequently read in a given time window. This can be done using
+`maxRecentlyQueriedSeriesBlocks` to set a maximum value and lookback time window
+to determine the duration over which the max limit is enforced.
+
+You can use the Prometheus query `rate(query_stats_total_docs_per_block[1m])` to
+determine how many time series blocks are queried per second by your cluster
+today to determine what is a sane value to set this to. Make sure to multiply
+that number by the `lookback` period to get your desired max value. For
+instance, if the query shows that you frequently query 10,000 time series blocks
+per second safely with your deployment and you want to use the default lookback
+of `5s` then you would multiply 10,000 by 5 to get 50,000 as a max value with
+a 5s lookback.
+
+### Annotated configuration
+
+```
+limits:
+ # If set, will enforce a maximum cap on time series blocks matched for
+ # queries searching time series by dimensions.
+ maxRecentlyQueriedSeriesBlocks:
+ # Value sets the maximum time series blocks matched, use your block
+ # settings to understand how many datapoints that may actually translate
+ # to (e.g. 2 hour blocks for unaggregated data with 30s scrape interval
+ # will translate to 240 datapoints per single time series block matched).
+ value: 0
+ # Lookback sets the time window that this limit is enforced over, every
+ # lookback period the global count is reset to zero and when the limit
+ # is reached it will reject any further time series blocks being matched
+ # and read until the lookback period resets.
+ lookback: 5s
+
+ # If set then will limit the number of parallel write batch requests to the
+ # database and return errors if hit.
+ maxOutstandingWriteRequests: 0
+
+ # If set then will limit the number of parallel read requests to the
+ # database and return errors if hit.
+ # Note since reads can be so variable in terms of how expensive they are
+ # it is not always very useful to use this config to prevent resource
+ # exhaustion from reads.
+ maxOutstandingReadRequests: 0
+```
+
+## M3 Query and M3 Coordinator
+
+### Deployment
+
+Protecting queries impacting your ingestion of metrics for metrics workloads
+can first and foremost be done by deploying M3 Query and M3 Coordinator
+independently. That is, for writes to M3 use a dedicated deployment of
+M3 Coordinator instances, and then for queries to M3 use a dedicated deployment
+of M3 Query instances.
+
+This ensures when M3 Query instances become busy and are starved of resources
+serving an unexpected query load, they will not interrupt the flow of metrics
+being ingested to M3.
+
+### Configuring limits
+
+To protect against individual queries using too many resources, you can specify some
+sane limits in the M3 Query (and consequently M3 Coordinator) configuration
+file under the top level `limits` config stanza.
+
+There are two types of limits:
+
+- Per query time series limit
+- Per query time series * blocks limit (docs limit)
+
+When either of these limits are hit, you can define the behavior you would like,
+either to return an error when this limit is hit, or to return a partial result
+with the response header `M3-Results-Limited` detailing the limit that was hit
+and a warning included in the response body.
+
+### Annotated configuration
+
+```
+limits:
+ # If set will override default limits set per query.
+ perQuery:
+ # If set limits the number of time series returned for any given
+ # individual storage node per query, before returning result to query
+ # service.
+ maxFetchedSeries: 0
+
+ # If set limits the number of index documents matched for any given
+ # individual storage node per query, before returning result to query
+ # service.
+ # This equates to the number of time series * number of blocks, so for
+ # 100 time series matching 4 hours of data for a namespace using a 2 hour
+ # block size, that would result in matching 200 index documents.
+ maxFetchedDocs: 0
+
+ # If true this results in causing a query error if the query exceeds
+ # the series or blocks limit for any given individual storage node per query.
+ requireExhaustive: false
+
+ # If set this limits the max number of datapoints allowed to be used by a
+ # given query. This is applied at the query service after the result has
+ # been returned by a storage node.
+ maxFetchedDatapoints: 0
+
+ # If set will override default limits set globally.
+ global:
+ # If set this limits the max number of datapoints allowed to be used by all
+ # queries at any point in time, this is applied at the query service after
+ # the result has been returned by a storage node.
+ maxFetchedDatapoints: 0
+```
+
+### Headers
+
+The following headers can also be used to override configured limits on a per request basis (to allow for different limits dependent on caller):
+
+--8<--
+docs/common/headers_optional_read_limits.md
+--8<--
diff --git a/docs/operational_guide/upgrading_m3.md b/docs/operational_guide/upgrading_m3.md
index 6a4f22b045..323670bc6f 100644
--- a/docs/operational_guide/upgrading_m3.md
+++ b/docs/operational_guide/upgrading_m3.md
@@ -14,9 +14,45 @@ This includes upgrading:
### Graphs to monitor
-While upgrading M3DB nodes, it's important to monitor the status of bootstrapping the individual nodes. This can be monitored using the [M3DB Node Details](https://grafana.com/grafana/dashboards/8126) graph.
+While upgrading M3DB nodes, it's important to monitor the status of bootstrapping the individual nodes. This can be monitored using the [M3DB Node Details](https://github.com/m3db/m3/blob/master/integrations/grafana/m3db_dashboard.json) dashboard.
Typically, the `Bootstrapped` graph under `Background Tasks` and the graphs within the `CPU and Memory Utilization` give a good understanding of how well bootstrapping is going.
+### Kubernetes
+
+If running `M3DB` on Kubernetes, upgrade by completing the following steps.
+
+1. Identify the version of m3dbnode to upgrade to [on Quay](https://quay.io/repository/m3db/m3dbnode?tab=tags).
+
+2. Replace the Docker image in the `StatefulSet` manifest (or `m3db-operator` manifest) to be the new version of m3dbnode.
+
+```yaml
+spec:
+ image: quay.io/m3db/m3dbnode:$VERSION
+```
+
+3. Once updated, apply the updated manifest and a rolling restart will be performed. You must wait until the `StatefulSet` is entirely upgraded and bootstrapped (as per the M3DB Node Details dashboard) before proceeding to the next `StatefulSet` otherwise multiple replicas will be unavailable at once.
+
+```bash
+kubectl apply -f
+```
+
+### Downgrading
+
+The `upgrading` steps above can also be used to downgrade M3DB. However, it is important to refer to the release notes to make sure that versions are
+backwards compatible.
+
+## m3coordinator
+
+`m3coordinator` can be upgraded using similar steps as `m3dbnode`, however, the images can be [found here](https://quay.io/repository/m3db/m3coordinator) instead.
+
+## m3query
+
+`m3query` can be upgraded using similar steps as `m3dbnode`, however, the images can be [found here](https://quay.io/repository/m3db/m3query) instead.
+
+## m3aggregator
+
+`m3aggregator` can be upgraded using similar steps as `m3dbnode`, however, the images can be [found here](https://quay.io/repository/m3db/m3aggregator) instead.
+
### Non-Kubernetes
It is very important that for each replica set, only one node gets upgraded at a time. However, multiple nodes can be upgraded across replica sets.
@@ -63,39 +99,3 @@ pkill m3dbnode
```
4) Repeat steps 2 and 3 until all nodes have been upgraded.
-
-### Kubernetes
-
-If running `M3DB` on Kubernetes, upgrade by completing the following steps.
-
-1. Identify the version of m3dbnode to upgrade to [on Quay](https://quay.io/repository/m3db/m3dbnode?tab=tags).
-
-2. Replace the Docker image in the `StatefulSet` manifest (or `m3db-operator` manifest) to be the new version of m3dbnode.
-
-```yaml
-spec:
- image: quay.io/m3db/m3dbnode:$VERSION
-```
-
-3. Once updated, apply the updated manifest and a rolling restart will be performed.
-
-```bash
-kubectl apply -f
-```
-
-### Downgrading
-
-The `upgrading` steps above can also be used to downgrade M3DB. However, it is important to refer to the release notes to make sure that versions are
-backwards compatible.
-
-## m3coordinator
-
-`m3coordinator` can be upgraded using similar steps as `m3dbnode`, however, the images can be [found here](https://quay.io/repository/m3db/m3coordinator) instead.
-
-## m3query
-
-`m3query` can be upgraded using similar steps as `m3dbnode`, however, the images can be [found here](https://quay.io/repository/m3db/m3query) instead.
-
-## m3aggregator
-
-`m3aggregator` can be upgraded using similar steps as `m3dbnode`, however, the images can be [found here](https://quay.io/repository/m3db/m3aggregator) instead.
diff --git a/docs/overview/components.md b/docs/overview/components.md
index e734e29a65..fe952f1586 100644
--- a/docs/overview/components.md
+++ b/docs/overview/components.md
@@ -10,7 +10,7 @@ M3DB is a distributed time series database that provides scalable storage and a
## M3 Query
-M3 Query is a service that houses a distributed query engine for querying both realtime and historical metrics, supporting several different query languages. It is designed to support both low latency realtime queries and queries that can take longer to execute, aggregating over much larger datasets, for analytical use cases. For more details, see the [query engine documentation](../query_engine/).
+M3 Query is a service that houses a distributed query engine for querying both realtime and historical metrics, supporting several different query languages. It is designed to support both low latency realtime queries and queries that can take longer to execute, aggregating over much larger datasets, for analytical use cases. For more details, see the [query engine documentation](../m3query/).
## M3 Aggregator
diff --git a/docs/overview/media.md b/docs/overview/media.md
index 6b6461b4f1..b7d474b6b5 100644
--- a/docs/overview/media.md
+++ b/docs/overview/media.md
@@ -6,8 +6,20 @@
- [Building a Query Engine for High Cardinality Time Series Data](https://eng.uber.com/billion-data-point-challenge) By Nikunj Aggarwal and Ben Raskin - Dec 10, 2018.
+## M3 Community Meetups
+
+Recordings of all past meetups can be found on a [Vimeo M3 Community Meetings folder](https://vimeo.com/user/120001164/folder/2290331).
+
+- [June 2020 Meetup](https://vimeo.com/440390957).
+
+- [July 2020 Meetup and LinkedIn presentation](https://vimeo.com/440449118).
+
+- [August 2020 Meetup and Walmart presentation](https://vimeo.com/449883279).
+
## Recorded Talks
+- [CNCF Webinar: Maximizing M3 – Pushing performance boundaries in a distributed metrics engine](https://www.cncf.io/webinars/maximizing-m3-pushing-performance-boundaries-in-a-distributed-metrics-engine-at-global-scale/) By Ryan Allen - Aug 6, 2020.
+
- [OSCON 2019: Large-Scale Automated Storage on Kubernetes](https://youtu.be/N9A7xSE9n-c) By Matt Schallert - Jul 18, 2019. [Slides](https://schallert.io/OSCON%20Large-Scale%20Automated%20Storage%20on%20Kubernetes.pdf)
- [How to get the 30,000 ft view, 1 ft view and everything in between without breaking the bank](https://vimeo.com/341146220) By Martin Mao - June 5, 2019. [Slides](https://www.slideshare.net/MartinMao/monitorama-2019-pdx-martin-mao)
@@ -31,3 +43,4 @@
- [PromCon 2018 Panel Discussion: Prometheus Long-Term Storage Approaches](https://youtube.com/watch?v=3pTG_N8yGSU) including highlights of the M3 stack by Nikunj Aggarwal - Aug 9, 2018.
- [Putting billions of time series to work at Uber with autonomous monitoring](https://vimeo.com/274821002) By Prateek Rungta - Jun 6, 2018. [Slides](http://bit.ly/m3db-monitorama2018)
+
diff --git a/docs/overview/roadmap.md b/docs/overview/roadmap.md
new file mode 100644
index 0000000000..c38e3e8d4e
--- /dev/null
+++ b/docs/overview/roadmap.md
@@ -0,0 +1,13 @@
+# Roadmap
+
+This roadmap is open for suggestions and currently just a small snapshot of what is coming up.
+
+Short:
+- Add diagrams of what using M3 looks like (broken down by use case)
+- Improve operational guides for the aggregator
+- Add tutorials for a variety of use cases
+- Add design documentation of reverse index
+- Add design documentation of aggregator
+
+Medium:
+- Plan what a v1.0 release looks like
diff --git a/docs/performance/index.md b/docs/performance/index.md
deleted file mode 100644
index 8767efd545..0000000000
--- a/docs/performance/index.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Performance configurations and settings for M3DB and m3query
-
-**Please note:** This documentation is a work in progress and more detail is required.
diff --git a/docs/performance/m3db/index.md b/docs/performance/m3db/index.md
deleted file mode 100644
index 5133851822..0000000000
--- a/docs/performance/m3db/index.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Performance configurations
-
-
diff --git a/docs/performance/m3query/index.md b/docs/performance/m3query/index.md
deleted file mode 100644
index aec029afbc..0000000000
--- a/docs/performance/m3query/index.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Performance configurations
-
-Below are some common configurations related to performance for the query engine.
diff --git a/docs/query_engine/architecture/index.md b/docs/query_engine/architecture/index.md
deleted file mode 100644
index 5fa51edfc2..0000000000
--- a/docs/query_engine/architecture/index.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Architecture
-
-**Please note:** This documentation is a work in progress and more detail is required.
-
-## Overview
-
-M3 Query and M3 Coordinator are written entirely in Go, M3 Query is as a query engine for [M3DB](https://m3db.github.io/m3/) and M3 Coordinator is a remote read/write endpoint for Prometheus and M3DB. To learn more about Prometheus's remote endpoints and storage, [see here](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
diff --git a/docs/query_engine/index.md b/docs/query_engine/index.md
deleted file mode 100644
index 108332327f..0000000000
--- a/docs/query_engine/index.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# M3 Query, a distributed query engine for M3DB and Prometheus
-
-**Please note:** This documentation is a work in progress and more detail is required.
diff --git a/examples/dbnode/proto_client/README.md b/examples/dbnode/proto_client/README.md
index 85cecf3bdc..9d3fa1db4e 100644
--- a/examples/dbnode/proto_client/README.md
+++ b/examples/dbnode/proto_client/README.md
@@ -1,5 +1,5 @@
# Protobuf Client Example
-1. Setup an M3DB container as described in the [using M3DB as a general purpose time series database guide](https://m3db.github.io/m3/how_to/use_as_tsdb).
+1. Setup an M3DB container as described in the [using M3DB as a general purpose time series database guide](https://docs.m3db.io/how_to/use_as_tsdb).
2. Modify `config.yaml` with any changes you've made to the default configuration. Also if you make any changes to M3DB's configuration make sure to do so before restarting the container as M3DB does not reload YAML configuration dynamically.
3. Execute `go run main.go -f config.yaml`
\ No newline at end of file
diff --git a/glide.lock b/glide.lock
deleted file mode 100644
index d2cd3f346a..0000000000
--- a/glide.lock
+++ /dev/null
@@ -1,669 +0,0 @@
-hash: baed617ba8848f4267525e942246b21bc23c660894dee1eebc40b3910467e067
-updated: 2020-02-10T15:18:46.492669-05:00
-imports:
-- name: github.com/alecthomas/units
- version: f65c72e2690dc4b403c8bd637baf4611cd4c069b
-- name: github.com/apache/thrift
- version: 05b5a2227fe44056ce829fe59583126fd6478a58
- repo: https://github.com/m3db/thrift
- vcs: git
- subpackages:
- - lib/go/thrift
-- name: github.com/beorn7/perks
- version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
- subpackages:
- - quantile
-- name: github.com/BurntSushi/toml
- version: 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005
-- name: github.com/c2h5oh/datasize
- version: 4eba002a5eaea69cf8d235a388fc6b65ae68d2dd
-- name: github.com/cespare/xxhash
- version: 48099fad606eafc26e3a569fad19ff510fff4df6
-- name: github.com/cockroachdb/cmux
- version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92
-- name: github.com/coreos/bbolt
- version: 32c383e75ce054674c53b5a07e55de85332aee14
-- name: go.etcd.io/etcd
- version: 3cf2f69b5738fb702ba1a935590f36b52b18979b
- subpackages:
- - alarm
- - auth
- - auth/authpb
- - client
- - clientv3
- - clientv3/concurrency
- - clientv3/namespace
- - clientv3/naming
- - compactor
- - discovery
- - embed
- - error
- - etcdserver
- - etcdserver/api
- - etcdserver/api/etcdhttp
- - etcdserver/api/v2http
- - etcdserver/api/v2http/httptypes
- - etcdserver/api/v3client
- - etcdserver/api/v3election
- - etcdserver/api/v3election/v3electionpb
- - etcdserver/api/v3election/v3electionpb/gw
- - etcdserver/api/v3lock
- - etcdserver/api/v3lock/v3lockpb
- - etcdserver/api/v3lock/v3lockpb/gw
- - etcdserver/api/v3rpc
- - etcdserver/api/v3rpc/rpctypes
- - etcdserver/auth
- - etcdserver/etcdserverpb
- - etcdserver/etcdserverpb/gw
- - etcdserver/membership
- - etcdserver/stats
- - integration
- - lease
- - lease/leasehttp
- - lease/leasepb
- - mvcc
- - mvcc/backend
- - mvcc/mvccpb
- - pkg/adt
- - pkg/contention
- - pkg/cors
- - pkg/cpuutil
- - pkg/crc
- - pkg/debugutil
- - pkg/fileutil
- - pkg/httputil
- - pkg/idutil
- - pkg/ioutil
- - pkg/logutil
- - pkg/monotime
- - pkg/netutil
- - pkg/pathutil
- - pkg/pbutil
- - pkg/runtime
- - pkg/schedule
- - pkg/srv
- - pkg/testutil
- - pkg/tlsutil
- - pkg/transport
- - pkg/types
- - pkg/wait
- - proxy/grpcproxy
- - proxy/grpcproxy/adapter
- - proxy/grpcproxy/cache
- - raft
- - raft/raftpb
- - rafthttp
- - snap
- - snap/snappb
- - store
- - version
- - wal
- - wal/walpb
-- name: github.com/coreos/go-semver
- version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6
- subpackages:
- - semver
-- name: github.com/coreos/go-systemd
- version: 48702e0da86bd25e76cfef347e2adeb434a0d0a6
- subpackages:
- - daemon
- - journal
- - util
-- name: github.com/coreos/pkg
- version: 97fdf19511ea361ae1c100dd393cc47f8dcfa1e1
- subpackages:
- - capnslog
-- name: github.com/couchbase/vellum
- version: 41f2deade2cfab59facd263e918d7c05f656c2e9
- subpackages:
- - utf8
-- name: github.com/davecgh/go-spew
- version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
- subpackages:
- - spew
-- name: github.com/dgrijalva/jwt-go
- version: d2709f9f1f31ebcda9651b03077758c1f3a0018c
-- name: github.com/edsrzf/mmap-go
- version: 0bce6a6887123b67a60366d2c9fe2dfb74289d2e
-- name: github.com/fortytw2/leaktest
- version: b433bbd6d743c1854040b39062a3916ed5f78fe8
-- name: github.com/fsnotify/fsnotify
- version: 1485a34d5d5723fea214f5710708e19a831720e4
-- name: github.com/ghodss/yaml
- version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7
-- name: github.com/go-kit/kit
- version: dc489b75b9cdbf29c739534c2aa777cabb034954
- subpackages:
- - log
- - log/level
-- name: github.com/go-logfmt/logfmt
- version: 432dd90af23366a89a611c020003fc8ba281ae5d
-- name: github.com/go-playground/locales
- version: 630ebbb602847eba93e75ae38bbc7bb7abcf1ff3
- subpackages:
- - currency
-- name: github.com/go-playground/universal-translator
- version: 71201497bace774495daed26a3874fd339e0b538
-- name: github.com/gogo/protobuf
- version: 5628607bb4c51c3157aacc3a50f0ab707582b805
- subpackages:
- - gogoproto
- - jsonpb
- - proto
- - protoc-gen-gogo/descriptor
- - sortkeys
- - types
-- name: github.com/golang/groupcache
- version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
- subpackages:
- - lru
-- name: github.com/golang/mock
- version: 9fa652df1129bef0e734c9cf9bf6dbae9ef3b9fa
- subpackages:
- - gomock
-- name: github.com/golang/protobuf
- version: 6c65a5562fc06764971b7c5d05c76c75e84bdbf7
- subpackages:
- - jsonpb
- - proto
- - protoc-gen-go/descriptor
- - protoc-gen-go/plugin
- - ptypes
- - ptypes/any
- - ptypes/duration
- - ptypes/empty
- - ptypes/struct
- - ptypes/timestamp
- - ptypes/wrappers
-- name: github.com/golang/snappy
- version: 553a641470496b2327abcac10b36396bd98e45c9
-- name: github.com/google/btree
- version: 925471ac9e2131377a91e1595defec898166fe49
-- name: github.com/google/go-cmp
- version: 6f77996f0c42f7b84e5a2b252227263f93432e9b
- subpackages:
- - cmp
- - cmp/cmpopts
- - cmp/internal/diff
- - cmp/internal/flags
- - cmp/internal/function
- - cmp/internal/value
-- name: github.com/google/uuid
- version: c2e93f3ae59f2904160ceaab466009f965df46d6
-- name: github.com/gorilla/mux
- version: 00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15
-- name: github.com/grpc-ecosystem/go-grpc-prometheus
- version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
-- name: github.com/grpc-ecosystem/grpc-gateway
- version: 8cc3a55af3bcf171a1c23a90c4df9cf591706104
- subpackages:
- - runtime
- - runtime/internal
- - utilities
-- name: github.com/hashicorp/hcl
- version: cf7d376da96d9cecec7c7483cec2735efe54a410
- subpackages:
- - hcl/ast
- - hcl/parser
- - hcl/printer
- - hcl/scanner
- - hcl/strconv
- - hcl/token
- - json/parser
- - json/scanner
- - json/token
-- name: github.com/hydrogen18/stalecucumber
- version: 9b38526d4bdf8e197c31344777fc28f7f48d250d
-- name: github.com/inconshreveable/mousetrap
- version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-- name: github.com/jhump/protoreflect
- version: e0795ed1d1ada047d01e90243863def21db467fc
- subpackages:
- - desc
- - desc/builder
- - desc/internal
- - desc/protoparse
- - dynamic
- - internal
-- name: github.com/jonboulle/clockwork
- version: 2eee05ed794112d45db504eb05aa693efd2b8b09
-- name: github.com/kr/logfmt
- version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
-- name: github.com/leanovate/gopter
- version: e2604588f4db2d2e5eb78ae75d615516f55873e3
- subpackages:
- - commands
- - gen
- - prop
-- name: github.com/lightstep/lightstep-tracer-common
- version: 082dbbf1b51a87f008f51a6203c269395ac3fe54
- subpackages:
- - golang/gogo/collectorpb
- - golang/gogo/lightsteppb
-- name: github.com/lightstep/lightstep-tracer-go
- version: 95891af43e111d60f0ae6adaa39c2abe8caad401
- subpackages:
- - lightstep/rand
-- name: github.com/m3db/bitset
- version: 07973db6b78acb62ac207d0538055e874b49d90d
-- name: github.com/m3db/bloom
- version: 47fe1193cdb900de7193d1f3d26ea9b2cbf6fb31
-- name: github.com/m3db/pilosa
- version: ac8920c6e1abe06e2b0a3deba79a9910c39700e6
- subpackages:
- - roaring
-- name: github.com/m3db/prometheus_client_golang
- version: 8ae269d24972b8695572fa6b2e3718b5ea82d6b4
- subpackages:
- - prometheus
- - prometheus/promhttp
-- name: github.com/m3db/prometheus_client_model
- version: 8b2299a4bf7d7fc10835527021716d4b4a6e8700
- subpackages:
- - go
-- name: github.com/m3db/prometheus_common
- version: 25aaa3dff79bb48116615ebe1dea6a494b74ce77
- subpackages:
- - expfmt
- - internal/bitbucket.org/ww/goautoneg
- - model
-- name: github.com/m3db/prometheus_procfs
- version: 1878d9fbb537119d24b21ca07effd591627cd160
-- name: github.com/m3db/stackadler32
- version: bfebcd73ef6ffe0ee30489227f0330c39064b674
-- name: github.com/m3db/stackmurmur3
- version: 744c0229c12ed0e4f8cb9d081a2692b3300bf705
-- name: github.com/m3db/vellum
- version: e766292d14de216c324bb60b17320af72dee59c6
- subpackages:
- - regexp
-- name: github.com/magiconair/properties
- version: de8848e004dd33dc07a2947b3d76f618a7fc7ef1
-- name: github.com/matttproud/golang_protobuf_extensions
- version: c12348ce28de40eed0136aa2b644d0ee0650e56c
- subpackages:
- - pbutil
-- name: github.com/mauricelam/genny
- version: eb2c5232c885956af3565a20ecf48555cab2b9bc
- subpackages:
- - generic
-- name: github.com/MichaelTJones/pcg
- version: df440c6ed7ed8897ac98a408365e5e89c7becf1a
-- name: github.com/mitchellh/mapstructure
- version: 3536a929edddb9a5b34bd6861dc4a9647cb459fe
-- name: github.com/oklog/ulid
- version: e51a56f2a4c1bf73c967ca6d45d5366bade31943
-- name: github.com/opentracing-contrib/go-stdlib
- version: cf7a6c988dc994e945d2715565026f3cc8718689
- subpackages:
- - nethttp
-- name: github.com/opentracing/opentracing-go
- version: 659c90643e714681897ec2521c60567dd21da733
- subpackages:
- - ext
- - log
- - mocktracer
-- name: github.com/pborman/getopt
- version: ec82d864f599c39673eef89f91b93fa5576567a1
-- name: github.com/pborman/uuid
- version: adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1
-- name: github.com/pelletier/go-toml
- version: 8fe62057ea2d46ce44254c98e84e810044dbe197
-- name: github.com/pilosa/pilosa
- version: bc9747cc0f19702d9753de7ea9375d8311dfc706
- subpackages:
- - logger
- - stats
-- name: github.com/pkg/errors
- version: ba968bfe8b2f7e042a574c888954fccecfa385b4
-- name: github.com/pkg/profile
- version: 5b67d428864e92711fcbd2f8629456121a56d91f
-- name: github.com/pmezard/go-difflib
- version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
- subpackages:
- - difflib
-- name: github.com/prometheus/client_golang
- version: c5b7fccd204277076155f10851dad72b76a49317
- subpackages:
- - prometheus
-- name: github.com/prometheus/client_model
- version: 6f3806018612930941127f2a7c6c453ba2c527d2
- subpackages:
- - go
-- name: github.com/prometheus/common
- version: 287d3e634a1e550c9e463dd7e5a75a422c614505
- subpackages:
- - expfmt
- - internal/bitbucket.org/ww/goautoneg
- - model
-- name: github.com/prometheus/procfs
- version: a1dba9ce8baed984a2495b658c82687f8157b98f
- subpackages:
- - xfs
-- name: github.com/prometheus/prometheus
- version: 43acd0e2e93f9f70c49b2267efa0124f1e759e86
- subpackages:
- - pkg/gate
- - pkg/labels
- - pkg/textparse
- - pkg/timestamp
- - pkg/value
- - promql
- - storage
- - storage/tsdb
- - tsdb
- - tsdb/chunkenc
- - tsdb/chunks
- - tsdb/encoding
- - tsdb/errors
- - tsdb/fileutil
- - tsdb/goversion
- - tsdb/index
- - tsdb/labels
- - tsdb/wal
- - util/httputil
- - util/stats
- - util/strutil
- - util/teststorage
- - util/testutil
-- name: github.com/rakyll/statik
- version: 3bac566d30cdbeddef402a80f3d6305860e59f12
- subpackages:
- - fs
-- name: github.com/RoaringBitmap/roaring
- version: 4676818d7478f72f5041418f5afbb15a5080dbb7
-- name: github.com/russross/blackfriday
- version: d3b5b032dc8e8927d31a5071b56e14c89f045135
-- name: github.com/satori/go.uuid
- version: f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
-- name: github.com/sergi/go-diff
- version: feef008d51ad2b3778f85d387ccf91735543008d
- subpackages:
- - diffmatchpatch
-- name: github.com/shurcooL/sanitized_anchor_name
- version: 7bfe4c7ecddb3666a94b053b422cdd8f5aaa3615
-- name: github.com/spaolacci/murmur3
- version: 9f5d223c60793748f04a9d5b4b4eacddfc1f755d
-- name: github.com/spf13/afero
- version: 588a75ec4f32903aa5e39a2619ba6a4631e28424
- subpackages:
- - mem
-- name: github.com/spf13/cast
- version: f31dc0aaab5a2feeca5c41783abbc347731fd08e
-- name: github.com/spf13/cobra
- version: 7c674d9e72017ed25f6d2b5e497a1368086b6a6f
- subpackages:
- - cobra
-- name: github.com/spf13/jwalterweatherman
- version: 94f6ae3ed3bceceafa716478c5fbf8d29ca601a1
-- name: github.com/spf13/pflag
- version: 4f9190456aed1c2113ca51ea9b89219747458dc1
-- name: github.com/spf13/viper
- version: eabbc68a3ecd5cf8c11a2f84dbda5e7a38493b2f
-- name: github.com/stretchr/objx
- version: cbeaeb16a013161a98496fad62933b1d21786672
-- name: github.com/stretchr/testify
- version: 6fe211e493929a8aac0469b93f28b1d0688a9a3a
- subpackages:
- - assert
- - mock
- - require
- - suite
-- name: github.com/subosito/gotenv
- version: de67a6614a4de71ad5e380b6946e56ab957d58c5
-- name: github.com/twotwotwo/sorts
- version: bf5c1f2b8553dec28372aa6ac5a8bf53a20a4c5b
-- name: github.com/uber-go/atomic
- version: 40ae6a40a970ef4cdbffa7b24b280e316db8accc
-- name: github.com/uber-go/tally
- version: 4292fb62574242de9fb0be278fc17d45defa5d33
- subpackages:
- - m3
- - m3/customtransports
- - m3/thrift
- - m3/thriftudp
- - multi
- - prometheus
- - thirdparty/github.com/apache/thrift/lib/go/thrift
-- name: github.com/uber/jaeger-client-go
- version: 2f47546e3facd43297739439600bcf43f44cce5d
- subpackages:
- - config
- - internal/baggage
- - internal/baggage/remote
- - internal/spanlog
- - internal/throttler
- - internal/throttler/remote
- - log
- - log/zap
- - rpcmetrics
- - thrift
- - thrift-gen/agent
- - thrift-gen/baggage
- - thrift-gen/jaeger
- - thrift-gen/sampling
- - thrift-gen/zipkincore
- - transport
- - utils
-- name: github.com/uber/jaeger-lib
- version: a87ae9d84fb038a8d79266298970720be7c80fcd
- subpackages:
- - metrics
- - metrics/tally
-- name: github.com/uber/tchannel-go
- version: 162ecb0dc97845a0c42aae3899651dba111085e5
- subpackages:
- - internal/argreader
- - relay
- - thrift
- - thrift/gen-go/meta
- - tnet
- - tos
- - trand
- - typed
-- name: github.com/ugorji/go
- version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
- subpackages:
- - codec
-- name: github.com/willf/bitset
- version: e553b05586428962bf7058d1044519d87ca72d74
-- name: github.com/xiang90/probing
- version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2
-- name: go.uber.org/atomic
- version: 40ae6a40a970ef4cdbffa7b24b280e316db8accc
-- name: go.uber.org/config
- version: c917157c9ba905e1c93f74a2611bc4356abe94d0
- subpackages:
- - internal/merge
- - internal/unreachable
-- name: go.uber.org/multierr
- version: 824d08f79702fe5f54aca8400aa0d754318786e7
-- name: go.uber.org/tools
- version: 2cfd321de3ee5d5f8a5fda2521d1703478334d98
- subpackages:
- - update-license
-- name: go.uber.org/zap
- version: f85c78b1dd998214c5f2138155b320a4a43fbe36
- subpackages:
- - buffer
- - internal/bufferpool
- - internal/color
- - internal/exit
- - zapcore
-- name: golang.org/x/crypto
- version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
- subpackages:
- - bcrypt
- - blowfish
-- name: golang.org/x/lint
- version: fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448
- subpackages:
- - golint
-- name: golang.org/x/net
- version: ab5485076ff3407ad2d02db054635913f017b0ed
- repo: https://github.com/golang/net
- vcs: git
- subpackages:
- - bpf
- - context
- - http2
- - http2/hpack
- - idna
- - internal/iana
- - internal/socket
- - internal/timeseries
- - ipv4
- - ipv6
- - lex/httplex
- - trace
-- name: golang.org/x/sync
- version: 112230192c580c3556b8cee6403af37a4fc5f28c
- subpackages:
- - errgroup
-- name: golang.org/x/sys
- version: c178f38b412c7b426e4e97be2e75d11ff7b8d4d4
- subpackages:
- - unix
-- name: golang.org/x/text
- version: 4ee4af566555f5fbe026368b75596286a312663a
- subpackages:
- - secure/bidirule
- - transform
- - unicode/bidi
- - unicode/norm
-- name: golang.org/x/time
- version: c06e80d9300e4443158a03817b8a8cb37d230320
- subpackages:
- - rate
-- name: golang.org/x/tools
- version: 2aa90c603ae35ab89ce30e45e49d870b7e5e7698
- subpackages:
- - go/analysis
- - go/analysis/passes/inspect
- - go/ast/astutil
- - go/ast/inspector
- - go/buildutil
- - go/gcexportdata
- - go/internal/cgo
- - go/internal/gcimporter
- - go/internal/packagesdriver
- - go/loader
- - go/packages
- - go/types/objectpath
- - go/types/typeutil
- - internal/fastwalk
- - internal/gopathwalk
- - internal/semver
-- name: google.golang.org/appengine
- version: 2e4a801b39fc199db615bfca7d0b9f8cd9580599
- subpackages:
- - datastore
- - internal
- - internal/app_identity
- - internal/base
- - internal/datastore
- - internal/log
- - internal/modules
- - internal/remote_api
-- name: google.golang.org/genproto
- version: 09f6ed296fc66555a25fe4ce95173148778dfa85
- subpackages:
- - googleapis/api/annotations
- - googleapis/rpc/status
- - protobuf/api
- - protobuf/field_mask
- - protobuf/ptype
- - protobuf/source_context
-- name: google.golang.org/grpc
- version: 5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e
- subpackages:
- - balancer
- - codes
- - connectivity
- - credentials
- - grpclb/grpc_lb_v1/messages
- - grpclog
- - health/grpc_health_v1
- - internal
- - keepalive
- - metadata
- - naming
- - peer
- - reflection
- - reflection/grpc_reflection_v1alpha
- - resolver
- - stats
- - status
- - tap
- - transport
-- name: gopkg.in/alecthomas/kingpin.v2
- version: 947dcec5ba9c011838740e680966fd7087a71d0d
- repo: https://github.com/alecthomas/kingpin.git
- vcs: git
-- name: gopkg.in/go-playground/validator.v9
- version: a021b2ec9a8a8bb970f3f15bc42617cb520e8a64
- repo: https://github.com/go-playground/validator.git
- vcs: git
-- name: gopkg.in/ini.v1
- version: 94291fffe2b14f4632ec0e67c1bfecfc1287a168
-- name: gopkg.in/validator.v2
- version: 3e4f037f12a1221a0864cf0dd2e81c452ab22448
- repo: https://github.com/go-validator/validator.git
- vcs: git
-- name: gopkg.in/vmihailenco/msgpack.v2
- version: a1382b1ce0c749733b814157c245e02cc1f41076
- repo: https://github.com/vmihailenco/msgpack.git
- vcs: git
- subpackages:
- - codes
-- name: gopkg.in/yaml.v2
- version: 5420a8b6744d3b0345ab293f6fcba19c978f1183
- repo: https://github.com/go-yaml/yaml.git
- vcs: git
-- name: honnef.co/go/tools
- version: 717cd7a0595327ea87bc8016753ce6e0ac546200
- subpackages:
- - arg
- - cmd/staticcheck
- - code
- - config
- - deprecated
- - edit
- - facts
- - functions
- - go/types/typeutil
- - internal/cache
- - internal/passes/buildir
- - internal/renameio
- - internal/robustio
- - internal/sharedcheck
- - ir
- - ir/irutil
- - lint
- - lint/lintdsl
- - lint/lintutil
- - lint/lintutil/format
- - loader
- - pattern
- - printf
- - report
- - simple
- - staticcheck
- - stylecheck
- - unused
- - version
-- name: github.com/influxdata/influxdb
- version: 01c8dd416270f424ab0c40f9291e269ac6921964
- subpackages:
- - models
-testImports:
-- name: github.com/glycerine/go-unsnap-stream
- version: 98d31706395aaac22e29676617f2ee37bee55b5a
-- name: github.com/mschoch/smat
- version: 90eadee771aeab36e8bf796039b8c261bebebe4f
-- name: github.com/philhofer/fwd
- version: bb6d471dc95d4fe11e432687f8b70ff496cf3136
-- name: github.com/tinylib/msgp
- version: efe20429c9b7b2a358aba9ba2d40ad56d966ce00
- subpackages:
- - msgp
diff --git a/glide.yaml b/glide.yaml
deleted file mode 100644
index c04db5e56c..0000000000
--- a/glide.yaml
+++ /dev/null
@@ -1,265 +0,0 @@
-package: github.com/m3db/m3
-import:
- - package: github.com/influxdata/influxdb
- version: 01c8dd416270f424ab0c40f9291e269ac6921964
- subpackages:
- - models
-
- - package: github.com/m3db/bitset
- version: 07973db6b78acb62ac207d0538055e874b49d90d
-
- - package: github.com/m3db/bloom
- version: 47fe1193cdb900de7193d1f3d26ea9b2cbf6fb31
-
- - package: github.com/m3db/stackmurmur3
- version: 744c0229c12ed0e4f8cb9d081a2692b3300bf705
-
- - package: github.com/m3db/stackadler32
- version: bfebcd73ef6ffe0ee30489227f0330c39064b674
-
- - package: github.com/MichaelTJones/pcg
- version: df440c6ed7ed8897ac98a408365e5e89c7becf1a
-
- - package: github.com/willf/bitset
- version: e553b05586428962bf7058d1044519d87ca72d74
-
- - package: github.com/cespare/xxhash
- version: 48099fad606eafc26e3a569fad19ff510fff4df6
-
- - package: go.etcd.io/etcd
- version: 3.4.3
-
- - package: github.com/pkg/errors
- version: ^0.8
-
- - package: github.com/apache/thrift
- version: 0.9.3-pool-read-binary-3
- subpackages:
- - lib/go/thrift
- repo: https://github.com/m3db/thrift
- vcs: git
-
- - package: github.com/golang/mock
- version: ^1
- subpackages:
- - gomock
-
- - package: github.com/golang/protobuf
- version: ^1.1.0
- subpackages:
- - proto
- - ptypes/timestamp
- - jsonpb
-
- - package: github.com/gogo/protobuf
- version: ^1
-
- - package: github.com/jhump/protoreflect
- version: e0795ed1d1ada047d01e90243863def21db467fc
-
- - package: go.uber.org/zap
- version: f85c78b1dd998214c5f2138155b320a4a43fbe36
-
- - package: github.com/spaolacci/murmur3
- version: 9f5d223c60793748f04a9d5b4b4eacddfc1f755d
-
- - package: github.com/uber/tchannel-go
- version: v1.12.0
- subpackages:
- - thrift
-
- - package: gopkg.in/vmihailenco/msgpack.v2
- version: a1382b1ce0c749733b814157c245e02cc1f41076
- repo: https://github.com/vmihailenco/msgpack.git
- vcs: git
-
- - package: github.com/uber-go/tally
- version: ^3.3.10
-
- - package: golang.org/x/net
- version: ab5485076ff3407ad2d02db054635913f017b0ed
- repo: https://github.com/golang/net
- vcs: git
-
- - package: google.golang.org/appengine/datastore
- version: 2e4a801b39fc199db615bfca7d0b9f8cd9580599
-
- - package: github.com/pborman/getopt
- version: ec82d864f599c39673eef89f91b93fa5576567a1
-
- - package: github.com/spf13/cobra
- version: 7c674d9e72017ed25f6d2b5e497a1368086b6a6f
- subpackages:
- - cobra
-
- - package: github.com/spf13/pflag
- version: 4f9190456aed1c2113ca51ea9b89219747458dc1
-
- - package: github.com/spf13/viper
- version: ^1.0.0
-
- - package: github.com/RoaringBitmap/roaring
- version: ^0.4
-
- - package: github.com/uber-go/atomic
- version: ^1.2.0
-
- - package: github.com/satori/go.uuid
- version: ^1.2.0
-
- # NB(r): make sure to use the master commit for vellum
- # once all upstream changes are complete in github.com/m3db/vellum.
- - package: github.com/m3db/vellum
- version: e766292d14de216c324bb60b17320af72dee59c6
-
- - package: github.com/edsrzf/mmap-go # un-used but required for a compile time dep from vellum
- version: 0bce6a6887123b67a60366d2c9fe2dfb74289d2e
-
- # NB(r): make sure to use the master commit for pilosa
- # once all upstream changes are complete in github.com/pilosa/pilosa.
- - package: github.com/m3db/pilosa/roaring
- version: ac8920c6e1abe06e2b0a3deba79a9910c39700e6
-
- # NB(prateek): ideally, the following dependencies would be under testImport, but
- # Glide doesn't like that. https://github.com/Masterminds/glide/issues/564
- - package: github.com/stretchr/testify
- version: 6fe211e493929a8aac0469b93f28b1d0688a9a3a
- subpackages:
- - require
-
- - package: github.com/fortytw2/leaktest
- version: b433bbd6d743c1854040b39062a3916ed5f78fe8
-
- - package: github.com/sergi/go-diff
- version: feef008d51ad2b3778f85d387ccf91735543008d
-
- - package: github.com/golang/snappy
- version: 553a641470496b2327abcac10b36396bd98e45c9
-
- - package: github.com/gorilla/mux
- version: ^1.6.0
-
- - package: github.com/pborman/uuid
- version: ^1.1.0
-
- - package: gopkg.in/alecthomas/kingpin.v2
- version: ^2.2.6
- repo: https://github.com/alecthomas/kingpin.git
- vcs: git
-
- - package: github.com/pkg/profile
- version: 5b67d428864e92711fcbd2f8629456121a56d91f
-
- - package: golang.org/x/sync
- subpackages:
- - errgroup
-
- - package: github.com/google/go-cmp
- version: 0.3
- subpackages:
- - cmp
-
- - package: github.com/hydrogen18/stalecucumber
- version: 9b38526d4bdf8e197c31344777fc28f7f48d250d
-
- - package: github.com/c2h5oh/datasize
- version: 4eba002a5eaea69cf8d235a388fc6b65ae68d2dd
-
- # START_PROMETHEUS_DEPS
- - package: github.com/prometheus/prometheus
- version: ~2.12.0
-
- # To avoid prometheus/prometheus dependencies from breaking,
- # pin the transitive dependencies
- - package: github.com/prometheus/common
- version: ~0.7.0
- # END_PROMETHEUS_DEPS
-
- # START_TALLY_PROMETHEUS_DEPS
- - package: github.com/m3db/prometheus_client_golang
- version: 8ae269d24972b8695572fa6b2e3718b5ea82d6b4
-
- - package: github.com/m3db/prometheus_client_model
- version: 8b2299a4bf7d7fc10835527021716d4b4a6e8700
-
- - package: github.com/m3db/prometheus_common
- version: 25aaa3dff79bb48116615ebe1dea6a494b74ce77
-
- - package: github.com/m3db/prometheus_procfs
- version: 1878d9fbb537119d24b21ca07effd591627cd160
- # END_PROMETHEUS_DEPS
-
- - package: github.com/coreos/pkg
- version: 4
- subpackages:
- - capnslog
-
- # START_TRACING_DEPS
- - package: github.com/opentracing/opentracing-go
- version: ^1.1.0
-
- - package: github.com/uber/jaeger-lib
- version: ^2.0.0
-
- - package: github.com/uber/jaeger-client-go
- version: ~2.16.0
-
- - package: github.com/lightstep/lightstep-tracer-go
- version: ~v0.18.0
-
- - package: github.com/lightstep/lightstep-tracer-common
- version: ~1.0.3
-
- - package: github.com/opentracing-contrib/go-stdlib
- # Pin this on recommendation of the repo (no stable release yet). Still arguably better than rewriting
- # the same code.
- version: cf7a6c988dc994e945d2715565026f3cc8718689
-
- # END_TRACING_DEPS
-
- # To avoid conflicting packages not resolving the latest GRPC
- - package: google.golang.org/grpc
- version: 1.7.5
- subpackages:
- - codes
-
- - package: gopkg.in/validator.v2
- version: 3e4f037f12a1221a0864cf0dd2e81c452ab22448
- repo: https://github.com/go-validator/validator.git
- vcs: git
-
- - package: gopkg.in/go-playground/validator.v9
- version: a021b2ec9a8a8bb970f3f15bc42617cb520e8a64
- repo: https://github.com/go-playground/validator.git
- vcs: git
-
- - package: github.com/go-playground/universal-translator
- version: 71201497bace774495daed26a3874fd339e0b538
-
- - package: gopkg.in/yaml.v2
- version: 5420a8b6744d3b0345ab293f6fcba19c978f1183
- repo: https://github.com/go-yaml/yaml.git
- vcs: git
-
- - package: github.com/russross/blackfriday
- version: ^2.0.1
-
- - package: github.com/mauricelam/genny
- version: eb2c5232c885956af3565a20ecf48555cab2b9bc
-
- - package: github.com/leanovate/gopter
- version: e2604588f4db2d2e5eb78ae75d615516f55873e3
-
- - package: github.com/rakyll/statik
- version: ^0.1.6
-
- - package: golang.org/x/sys
- subpackages:
- - unix
- version: c178f38b412c7b426e4e97be2e75d11ff7b8d4d4
-
- - package: go.uber.org/config
- version: ^1.3.1
-
- - package: github.com/twotwotwo/sorts
- version: bf5c1f2b8553dec28372aa6ac5a8bf53a20a4c5b
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000000..6ef61528ee
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,164 @@
+module github.com/m3db/m3
+
+go 1.13
+
+require (
+ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
+ github.com/CAFxX/gcnotifier v0.0.0-20190112062741-224a280d589d // indirect
+ github.com/DataDog/datadog-go v3.7.1+incompatible // indirect
+ github.com/Masterminds/semver v1.5.0 // indirect
+ github.com/MichaelTJones/pcg v0.0.0-20180122055547-df440c6ed7ed
+ github.com/Microsoft/go-winio v0.4.14 // indirect
+ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
+ github.com/RoaringBitmap/roaring v0.4.21
+ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
+ github.com/apache/thrift v0.13.0
+ github.com/apex/log v1.3.0 // indirect
+ github.com/bmatcuk/doublestar v1.3.1 // indirect
+ github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b // indirect
+ github.com/briandowns/spinner v1.11.1 // indirect
+ github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae
+ github.com/cespare/xxhash/v2 v2.1.1
+ github.com/cheekybits/genny v1.0.0 // indirect
+ github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb // indirect
+ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
+ github.com/davecgh/go-spew v1.1.1
+ github.com/docker/go-connections v0.4.0 // indirect
+ github.com/fortytw2/leaktest v1.2.1-0.20180901000122-b433bbd6d743
+ github.com/fossas/fossa-cli v1.0.30
+ github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94
+ github.com/ghodss/yaml v1.0.0
+ github.com/gnewton/jargo v0.0.0-20150417131352-41f5f186a805 // indirect
+ github.com/go-kit/kit v0.10.0
+ github.com/go-ole/go-ole v1.2.4 // indirect
+ github.com/go-playground/universal-translator v0.17.0 // indirect
+ github.com/gogo/protobuf v1.3.1
+ github.com/golang/mock v1.4.3
+ github.com/golang/protobuf v1.3.3
+ github.com/golang/snappy v0.0.1
+ github.com/google/go-cmp v0.4.0
+ github.com/google/go-jsonnet v0.16.0
+ github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f // indirect
+ github.com/gorilla/handlers v1.4.2 // indirect
+ github.com/gorilla/mux v1.7.3
+ github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
+ github.com/hashicorp/hcl v1.0.1-0.20190611123218-cf7d376da96d // indirect
+ github.com/hydrogen18/stalecucumber v0.0.0-20151102144322-9b38526d4bdf
+ github.com/influxdata/influxdb v1.7.7
+ github.com/jhump/protoreflect v1.6.1
+ github.com/json-iterator/go v1.1.9
+ github.com/leanovate/gopter v0.2.8
+ github.com/lib/pq v1.6.0 // indirect
+ github.com/lightstep/lightstep-tracer-go v0.18.1
+ github.com/m3db/bitset v2.0.0+incompatible
+ github.com/m3db/bloom/v4 v4.0.0-20200901140942-52efb8544fe9
+ github.com/m3db/build-tools v0.0.0-20181013000606-edd1bdd1df8a
+ github.com/m3db/m3x v0.0.0-20190408051622-ebf3c7b94afd // indirect
+ github.com/m3db/prometheus_client_golang v0.8.1
+ github.com/m3db/prometheus_client_model v0.0.0-20180517145114-8b2299a4bf7d
+ github.com/m3db/prometheus_common v0.0.0-20180517030744-25aaa3dff79b
+ github.com/m3db/prometheus_procfs v0.8.1
+ github.com/m3db/stackadler32 v0.0.0-20180104200216-bfebcd73ef6f
+ github.com/m3db/stackmurmur3/v2 v2.0.2
+ github.com/m3db/tools v0.0.0-20181008195521-c6ded3f34878
+ github.com/m3dbx/pilosa v1.4.1
+ github.com/m3dbx/vellum v0.0.0-20200826162549-f94c029903de
+ github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885
+ github.com/mjibson/esc v0.1.0
+ github.com/opencontainers/image-spec v1.0.1 // indirect
+ github.com/opencontainers/runc v0.1.1 // indirect
+ github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9
+ github.com/opentracing/opentracing-go v1.2.0
+ github.com/ory/dockertest v3.3.5+incompatible
+ github.com/pborman/getopt v0.0.0-20160216163137-ec82d864f599
+ github.com/pborman/uuid v1.2.0
+ github.com/pelletier/go-toml v1.5.0 // indirect
+ github.com/pkg/errors v0.9.1
+ github.com/pkg/profile v1.2.1
+ github.com/pointlander/compress v1.1.0 // indirect
+ github.com/pointlander/jetset v1.0.0 // indirect
+ github.com/pointlander/peg v1.0.0
+ github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a // indirect
+ github.com/prometheus/client_golang v1.5.1
+ github.com/prometheus/common v0.9.1
+ github.com/prometheus/prometheus v1.8.2-0.20200420081721-18254838fbe2
+ github.com/rakyll/statik v0.1.6
+ github.com/remeh/sizedwaitgroup v1.0.0 // indirect
+ github.com/rhysd/go-github-selfupdate v1.2.2 // indirect
+ github.com/robskillington/gorename v0.0.0-20180424020013-52c7307cddd2
+ github.com/rveen/ogdl v0.0.0-20200522080342-eeeda1a978e7 // indirect
+ github.com/satori/go.uuid v1.2.0
+ github.com/sergi/go-diff v1.1.0
+ github.com/shirou/gopsutil v2.20.5+incompatible // indirect
+ github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a // indirect
+ github.com/spf13/cobra v0.0.5
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/spf13/pflag v1.0.5
+ github.com/spf13/viper v1.7.1
+ github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 // indirect
+ github.com/stretchr/testify v1.6.1
+ github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d // indirect
+ github.com/twotwotwo/sorts v0.0.0-20160814051341-bf5c1f2b8553
+ github.com/uber-go/atomic v0.0.0-00010101000000-000000000000 // indirect
+ github.com/uber-go/tally v3.3.13+incompatible
+ github.com/uber/jaeger-client-go v2.25.0+incompatible
+ github.com/uber/jaeger-lib v2.2.0+incompatible
+ github.com/uber/tchannel-go v1.12.0
+ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
+ github.com/willf/bitset v1.1.10
+ github.com/xeipuuv/gojsonschema v1.2.0 // indirect
+
+ // This is 3.4.13. Note: we need to specify the version this way due to the issue
+ // described in https://github.com/etcd-io/etcd/issues/11154 .
+ // Version string was obtained by the method described in
+ // https://github.com/etcd-io/etcd/issues/11154#issuecomment-568587798
+ go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b
+ go.uber.org/atomic v1.6.0
+ go.uber.org/config v1.4.0
+ go.uber.org/zap v1.13.0
+ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
+ golang.org/x/net v0.0.0-20200822124328-c89045814202
+ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
+ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a
+ golang.org/x/tools v0.0.0-20200601175630-2caf76543d99 // indirect
+ google.golang.org/grpc v1.27.1
+ gopkg.in/go-ini/ini.v1 v1.57.0 // indirect
+ gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
+ gopkg.in/go-playground/validator.v9 v9.7.0
+ gopkg.in/ini.v1 v1.51.1 // indirect
+ gopkg.in/russross/blackfriday.v2 v2.0.0
+ gopkg.in/src-d/go-git.v4 v4.13.1 // indirect
+ gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1
+ gopkg.in/vmihailenco/msgpack.v2 v2.8.3
+ gopkg.in/yaml.v2 v2.2.8
+ gotest.tools v2.2.0+incompatible
+)
+
+// branch 0.9.3-pool-read-binary-3
+replace github.com/apache/thrift => github.com/m3db/thrift v0.0.0-20190820191926-05b5a2227fe4
+
+// NB(nate): upgrading to the latest msgpack is not backwards compatibile as msgpack will no longer attempt to automatically
+// write an integer into the smallest number of bytes it will fit in. We rely on this behavior by having helper methods
+// in at least two encoders (see below) take int64s and expect that msgpack will size them down accordingly. We'll have
+// to make integer sizing explicit before attempting to upgrade.
+//
+// Encoders:
+// src/metrics/encoding/msgpack/base_encoder.go
+// src/dbnode/persist/fs/msgpack/encoder.go
+replace gopkg.in/vmihailenco/msgpack.v2 => github.com/vmihailenco/msgpack v2.8.3+incompatible
+
+replace github.com/stretchr/testify => github.com/stretchr/testify v1.1.4-0.20160305165446-6fe211e49392
+
+replace github.com/prometheus/common => github.com/prometheus/common v0.9.1
+
+// Fix legacy import path - https://github.com/uber-go/atomic/pull/60
+replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
+
+// Pull in https://github.com/etcd-io/bbolt/pull/220, required for go 1.14 compatibility
+//
+// etcd 3.14.13 depends on v1.3.3, but everything before v1.3.5 has unsafe misuses, and fails hard on go 1.14
+// TODO: remove after etcd pulls in the change to a new release on 3.4 branch
+replace go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.5
+
+// https://github.com/ory/dockertest/issues/212
+replace golang.org/x/sys => golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000000..997b87a67d
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,1144 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go v40.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
+github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/CAFxX/gcnotifier v0.0.0-20170518020117-39b0596a2da3/go.mod h1:Rn2zM2MnHze07LwkneP48TWt6UiZhzQTwCvw6djVGfE=
+github.com/CAFxX/gcnotifier v0.0.0-20190112062741-224a280d589d h1:n0G4ckjMEj7bWuGYUX0i8YlBeBBJuZ+HEHvHfyBDZtI=
+github.com/CAFxX/gcnotifier v0.0.0-20190112062741-224a280d589d/go.mod h1:Rn2zM2MnHze07LwkneP48TWt6UiZhzQTwCvw6djVGfE=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/datadog-go v3.7.1+incompatible h1:HmA9qHVrHIAqpSvoCYJ+c6qst0lgqEhNW6/KwfkHbS8=
+github.com/DataDog/datadog-go v3.7.1+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/MichaelTJones/pcg v0.0.0-20180122055547-df440c6ed7ed h1:hQC4FSwvsLH6rOLJTndsHnANARF9RwW4PbrDTjks/0A=
+github.com/MichaelTJones/pcg v0.0.0-20180122055547-df440c6ed7ed/go.mod h1:NQ4UMHqyfXyYVmZopcfwPRWJa0rw2aH16eDIltReVUo=
+github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
+github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8=
+github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
+github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5 h1:P5U+E4x5OkVEKQDklVPmzs71WM56RTTRqV4OrDC//Y4=
+github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5/go.mod h1:976q2ETgjT2snVCf2ZaBnyBbVoPERGjUz+0sofzEfro=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
+github.com/apex/log v1.3.0 h1:1fyfbPvUwD10nMoh3hY6MXzvZShJQn9/ck7ATgAt5pA=
+github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs=
+github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
+github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
+github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.4 h1:Xqf+7f2Vhl9tsqDYmXhnXInUdcrtgpRNpIA15/uldSc=
+github.com/armon/go-metrics v0.3.4/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.29.18/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmatcuk/doublestar v1.3.1 h1:rT8rxDPsavp9G+4ZULzqhhUSaI/OPsTZNG88Z3i0xvY=
+github.com/bmatcuk/doublestar v1.3.1/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
+github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b h1:AP/Y7sqYicnjGDfD5VcY4CIfh1hRXBUavxrvELjTiOE=
+github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
+github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0=
+github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
+github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae h1:2Zmk+8cNvAGuY8AyvZuWpUdpQUAXwfom4ReVMe/CTIo=
+github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
+github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk=
+github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
+github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/fortytw2/leaktest v1.2.1-0.20180901000122-b433bbd6d743 h1:QDM8xNoGxemDHdExynv+HzqkTPsFFZ8EyZdMwGElpGg=
+github.com/fortytw2/leaktest v1.2.1-0.20180901000122-b433bbd6d743/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fossas/fossa-cli v1.0.30 h1:QVD8zvBjkwUnL4qui63TkoxDlvbp6vX4kBV/Tr7rGZE=
+github.com/fossas/fossa-cli v1.0.30/go.mod h1:5K4/qTj0P2qaT1G3SccFidhmazoJ9dm/OexAAYT8lOI=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94 h1:NMtO+FvLt7roVanhHmJUsIRq9sEbEytH/PWNE+zR8vw=
+github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94/go.mod h1:L8VwozDBY4bGI25r29I6FURZus8xlVo/B7lNOSfre2g=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
+github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4=
+github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/gnewton/jargo v0.0.0-20150417131352-41f5f186a805 h1:rLZXvVgFIon3lI+v9IL8t1AmG9/yLMSRB5LQ0frn+6Q=
+github.com/gnewton/jargo v0.0.0-20150417131352-41f5f186a805/go.mod h1:x+HLDnZexLq1FmhrdgFf4c3EWGbqhU3ITvISBFyzvRo=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
+github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
+github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
+github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
+github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
+github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
+github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
+github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
+github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
+github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
+github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
+github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-github/v30 v30.1.0 h1:VLDx+UolQICEOKu2m4uAoMti1SxuEBAl7RSEG16L+Oo=
+github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQFEufcolZ95JfU8=
+github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0=
+github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw=
+github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f h1:XXzyYlFbxK3kWfcmu3Wc+Tv8/QQl/VqwsWuSYF1Rj0s=
+github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gophercloud/gophercloud v0.8.0/go.mod h1:Kc/QKr9thLKruO/dG0szY8kRIYS+iENz0ziI0hJf76A=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw=
+github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v1.3.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
+github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
+github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
+github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=
+github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
+github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.14.1 h1:YuM9SXYy583fxvSOkzCDyBPCtY+/IMSHEG1dKFMLZsA=
+github.com/grpc-ecosystem/grpc-gateway v1.14.1/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8=
+github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
+github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/hcl v1.0.1-0.20190611123218-cf7d376da96d h1:r4iSf+UX1tNxFJZ64FsUoOfysT7TePSbRNz4/mYGUIE=
+github.com/hashicorp/hcl v1.0.1-0.20190611123218-cf7d376da96d/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0OcoGs=
+github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/hydrogen18/stalecucumber v0.0.0-20151102144322-9b38526d4bdf h1:d8mEMzY9ktqK+eVFDLIsYOcM19/yYvZlmR0kcb4MrSQ=
+github.com/hydrogen18/stalecucumber v0.0.0-20151102144322-9b38526d4bdf/go.mod h1:KE5xQoh/IqNckSFoQXL5o5nEkrBiUDxatgac7TSMQ8Y=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=
+github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/influxdb v1.7.7 h1:UvNzAPfBrKMENVbQ4mr4ccA9sW+W1Ihl0Yh1s0BiVAg=
+github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
+github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
+github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
+github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
+github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
+github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
+github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
+github.com/jcmturner/gokrb5/v8 v8.2.0 h1:lzPl/30ZLkTveYsYZPKMcgXc8MbnE6RsTd4F9KgiLtk=
+github.com/jcmturner/gokrb5/v8 v8.2.0/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZgZdoFrZaZNM=
+github.com/jcmturner/rpc/v2 v2.0.2 h1:gMB4IwRXYsWw4Bc6o/az2HJgFUA1ffSh90i26ZJ6Xl0=
+github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jhump/protoreflect v1.6.1 h1:4/2yi5LyDPP7nN+Hiird1SAJ6YoxUm13/oxHGRnbPd8=
+github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=
+github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/leanovate/gopter v0.2.8 h1:eFPtJ3aa5zLfbxGROSNY75T9Dume60CWBAqoWQ3h/ig=
+github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8=
+github.com/lib/pq v1.6.0 h1:I5DPxhYJChW9KYc66se+oKFFQX6VuQrKiprsX6ivRZc=
+github.com/lib/pq v1.6.0/go.mod h1:4vXEAYvW1fRQ2/FhZ78H73A60MHw1geSm145z2mdY1g=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743 h1:143Bb8f8DuGWck/xpNUOckBVYfFbBTnLevfRZ1aVVqo=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1 h1:vi1F1IQ8N7hNWytK9DpJsUfQhGuNSc19z330K6vl4zk=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/m3db/bitset v2.0.0+incompatible h1:wMgri1Z2QSwJ8K/7ZuV7vE4feLOT7EofVC8RakIOybI=
+github.com/m3db/bitset v2.0.0+incompatible/go.mod h1:X8CCqZmZxs2O6d4qHhiqtAKCin4G5mScPhiwX9rsc5c=
+github.com/m3db/bloom v3.0.1+incompatible h1:EILDlnoiPKJAGtg3RCm7Zf5VIpwh+hgcHokoZNzeJy8=
+github.com/m3db/bloom v3.0.1+incompatible/go.mod h1:W6XzpFw4t+CIYq+NGyp5c2394YsUc1P1+W/KAWty2lU=
+github.com/m3db/bloom/v4 v4.0.0-20200901140942-52efb8544fe9 h1:H5Iznc9FI44Sekos8STE+Hj2cPohzaYfWTUHf77q6RY=
+github.com/m3db/bloom/v4 v4.0.0-20200901140942-52efb8544fe9/go.mod h1:JDmGHlO6ygyY1V9eOHtXiNl3+axznDTrBqwWEeWALlQ=
+github.com/m3db/build-tools v0.0.0-20181013000606-edd1bdd1df8a h1:CwsSHIJLeCESKdZ844jXg/3rQD3yA5azuVlJBp5w8U8=
+github.com/m3db/build-tools v0.0.0-20181013000606-edd1bdd1df8a/go.mod h1:Pk9AtZeKuCO2xcAth0gxwzRNFv4lV26GPSx4I6A7DQ8=
+github.com/m3db/m3x v0.0.0-20190408051622-ebf3c7b94afd h1:wzLBtXzxZM9b6IXwLSRE5crynocLTCuRDpGDaOJzyuI=
+github.com/m3db/m3x v0.0.0-20190408051622-ebf3c7b94afd/go.mod h1:zLbcVb352e3Jsg62A6zzEhZ1gumeFsiamTqDs9ZmZrs=
+github.com/m3db/prometheus_client_golang v0.8.1 h1:t7w/tcFws81JL1j5sqmpqcOyQOpH4RDOmIe3A3fdN3w=
+github.com/m3db/prometheus_client_golang v0.8.1/go.mod h1:8R/f1xYhXWq59KD/mbRqoBulXejss7vYtYzWmruNUwI=
+github.com/m3db/prometheus_client_model v0.0.0-20180517145114-8b2299a4bf7d h1:BtNPRz2kmh42OAVYjsOBjty2wE0FaIh7aPVtFNPcF48=
+github.com/m3db/prometheus_client_model v0.0.0-20180517145114-8b2299a4bf7d/go.mod h1:Qfsxn+LypxzF+lNhak7cF7k0zxK7uB/ynGYoj80zcD4=
+github.com/m3db/prometheus_common v0.0.0-20180517030744-25aaa3dff79b h1:DEtcqizQ9PnY4xudqHyze5F89jijy33fnPYYSCCYPXY=
+github.com/m3db/prometheus_common v0.0.0-20180517030744-25aaa3dff79b/go.mod h1:EBmDQaMAy4B8i+qsg1wMXAelLNVbp49i/JOeVszQ/rs=
+github.com/m3db/prometheus_procfs v0.8.1 h1:LsxWzVELhDU9sLsZTaFLCeAwCn7bC7qecZcK4zobs/g=
+github.com/m3db/prometheus_procfs v0.8.1/go.mod h1:N8lv8fLh3U3koZx1Bnisj60GYUMDpWb09x1R+dmMOJo=
+github.com/m3db/stackadler32 v0.0.0-20180104200216-bfebcd73ef6f h1:+FWCOZjB96lBc0L7lvz6/TW1gF5fEVd6JQNQG59CpZE=
+github.com/m3db/stackadler32 v0.0.0-20180104200216-bfebcd73ef6f/go.mod h1:VJQWii2dj/AE1cRKup4MXL4TKbbs5J4wZnNe5I1CZaw=
+github.com/m3db/stackmurmur3 v1.0.1 h1:ASTdJ6Bd2m34dgsnOBZHrwWdKGcpaavP3MokiKL/ERs=
+github.com/m3db/stackmurmur3 v1.0.1/go.mod h1:hkR/F91aDUfPxDv5Jw55ifIXV6ZW1jzNHW0d8GP02rw=
+github.com/m3db/stackmurmur3/v2 v2.0.2 h1:q/kOlC12PwOib9XziHUTM/jsCQULHnRxBz/H4hT/Aro=
+github.com/m3db/stackmurmur3/v2 v2.0.2/go.mod h1:HnguA9wfEbTqYdTT/5klUlhxSDPRMGaWyvLzugxW+MA=
+github.com/m3db/thrift v0.0.0-20190820191926-05b5a2227fe4 h1:1x3mMuURd3wqKJ2qVjhRYOAmL9g4EA9JTagWB/y/3xo=
+github.com/m3db/thrift v0.0.0-20190820191926-05b5a2227fe4/go.mod h1:xVfRinGzD3cYDRvMjy6RkIwM+iNL2KHNLZjT0VpVZT8=
+github.com/m3db/tools v0.0.0-20181008195521-c6ded3f34878 h1:kww0LtVVfGrXR7Ofpbi/9bvc2EGYMQC0LCH/gQXoolE=
+github.com/m3db/tools v0.0.0-20181008195521-c6ded3f34878/go.mod h1:TxroQUZzb1wzOsq+4+TfVtT7z89YTz3v2UJAYfLNfLE=
+github.com/m3dbx/pilosa v1.4.1 h1:/Cpp1XAHSd6orpjceXGiKpCoDdYBP5BD/6NoqGG9eVg=
+github.com/m3dbx/pilosa v1.4.1/go.mod h1:Jt0+w9O08sa7qWDeRC58VBjb4OeOTDMOhfvVmyeVCO8=
+github.com/m3dbx/vellum v0.0.0-20200826162549-f94c029903de h1:C4DpCfTNzJf5RhJqxOtfWAnD2d6ls7KDnK1boBGUnVg=
+github.com/m3dbx/vellum v0.0.0-20200826162549-f94c029903de/go.mod h1:DOTAUfV4bzK6Nrb0dboT/oCG0DnQuX+/n0jfZPh6xxI=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.2 h1:znVR8Q4g7/WlcvsxLBRWvo+vtFJUAbDn3w+Yak2xVMI=
+github.com/magiconair/properties v1.8.2/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885 h1:nCU/HIvsORu8nlebFTTkEpxao5zA/yt5Y4yQccm34bM=
+github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885/go.mod h1:wRyVMWiOZeVj+MieWS5tIBBtJ3RtqqMbPsA5Z+t5b5U=
+github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
+github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mjibson/esc v0.1.0 h1:5ch+murgrcwDFLOE2hwj0f7kE4xJfJhkSCAjSLY182o=
+github.com/mjibson/esc v0.1.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
+github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9 h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg=
+github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
+github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/getopt v0.0.0-20160216163137-ec82d864f599 h1:kpwMY/v/NNm+lnaTP5L9WVK8YEb6T3fu+XBAy+7M0kw=
+github.com/pborman/getopt v0.0.0-20160216163137-ec82d864f599/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
+github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.5.0 h1:5BakdOZdtKJ1FFk6QdL8iSGrMWsXgchNJcrnarjbmJQ=
+github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
+github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pointlander/compress v1.1.0 h1:5fUcQV2qEHvk0OpILH6eltwluN5VnwiYrkc1wjGUHnU=
+github.com/pointlander/compress v1.1.0/go.mod h1:q5NXNGzqj5uPnVuhGkZfmgHqNUhf15VLi6L9kW0VEc0=
+github.com/pointlander/jetset v1.0.0 h1:bNlaNAX7cDPID9SlcogmXlDWq0KcRJSpKwHXaAM3bGQ=
+github.com/pointlander/jetset v1.0.0/go.mod h1:zY6+WHRPB10uzTajloHtybSicLW1bf6Rz0eSaU9Deng=
+github.com/pointlander/peg v1.0.0 h1:rtCtA6Fu6xJpILX8WJfU+cvrcKmXgTfG/v+bkLP8NYY=
+github.com/pointlander/peg v1.0.0/go.mod h1:WJTMcgeWYr6fZz4CwHnY1oWZCXew8GWCF93FaAxPrh4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a h1:AA9vgIBDjMHPC2McaGPojgV2dcI78ZC0TLNhYCXEKH8=
+github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a/go.mod h1:lzZQ3Noex5pfAy7mkAeCjcBDteYU85uWWnJ/y6gKU8k=
+github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
+github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/prometheus v1.8.2-0.20200420081721-18254838fbe2 h1:JtWnHSHMC1h8mb6K5GsFzmhY/WMILsxQ4slsJu+lyg8=
+github.com/prometheus/prometheus v1.8.2-0.20200420081721-18254838fbe2/go.mod h1:ZnfuiMn3LNsry2q7ECmRe4WcscxmJSd2dIFpOi4w3lM=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=
+github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/remeh/sizedwaitgroup v1.0.0 h1:VNGGFwNo/R5+MJBf6yrsr110p0m4/OX4S3DCy7Kyl5E=
+github.com/remeh/sizedwaitgroup v1.0.0/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
+github.com/rhysd/go-github-selfupdate v1.2.2 h1:G+mNzkc1wEtpmM6sFS/Ghkeq+ad4Yp6EZEHyp//wGEo=
+github.com/rhysd/go-github-selfupdate v1.2.2/go.mod h1:khesvSyKcXDUxeySCedFh621iawCks0dS/QnHPcpCws=
+github.com/robskillington/gorename v0.0.0-20180424020013-52c7307cddd2 h1:t+C9QFlvAI+evRn96lz7eKyzo1CgDx3YVx3N/GJIetk=
+github.com/robskillington/gorename v0.0.0-20180424020013-52c7307cddd2/go.mod h1:CVTJ4xwzb/4H98jrd7NFgNoTAiL63scr2Pl7kqOcQAQ=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/rveen/ogdl v0.0.0-20200522080342-eeeda1a978e7 h1:Lftq+hHvm0kPWM1sDNqx1jkXAo1zw2YceoFo1hdyj7I=
+github.com/rveen/ogdl v0.0.0-20200522080342-eeeda1a978e7/go.mod h1:9fqUB54wJS9u5TSXJZhRfTdh1lXVxTytDjed7t2cNdw=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shirou/gopsutil v2.17.13-0.20180801053943-8048a2e9c577+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil v2.20.5+incompatible h1:tYH07UPoQt0OCQdgWWMgYHy3/a9bcxNpBIysykNIP7I=
+github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
+github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
+github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
+github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
+github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a h1:o6gDpunpOQeRPLojT1Zo6gkzwgGJWZjjtuXTZEwo6AM=
+github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
+github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
+github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A=
+github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v1.1.4-0.20160305165446-6fe211e49392 h1:7ubzBW6wJ46nWdWvZQlDjtGTnupA4Z1dyHY9Xbhq3us=
+github.com/stretchr/testify v1.1.4-0.20160305165446-6fe211e49392/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d h1:YN4gX82mT31qsizy2jRheOCrGLCs15VF9SV5XPuBvkQ=
+github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d/go.mod h1:GVSeM7r0P1RI1gOKYyN9IuNkhMmQwKGsjVf3ulDrdzo=
+github.com/tcnksm/go-gitconfig v0.1.2 h1:iiDhRitByXAEyjgBqsKi9QU4o2TNtv9kPP3RgPgXBPw=
+github.com/tcnksm/go-gitconfig v0.1.2/go.mod h1:/8EhP4H7oJZdIPyT+/UIsG87kTzrzM4UsLGSItWYCpE=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
+github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=
+github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
+github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
+github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
+github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/twmb/murmur3 v1.1.4 h1:NnlAxelwOgdQDmYuV0T/K+tpDQ/8wdsDVOGmvUqBOCw=
+github.com/twmb/murmur3 v1.1.4/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
+github.com/twotwotwo/sorts v0.0.0-20160814051341-bf5c1f2b8553 h1:DRC1ubdb3ZmyyIeCSTxjZIQAnpLPfKVgYrLETQuOPjo=
+github.com/twotwotwo/sorts v0.0.0-20160814051341-bf5c1f2b8553/go.mod h1:Rj7Csq/tZ/egz+Ltc2IVpsA5309AmSMEswjkTZmq2Xc=
+github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
+github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
+github.com/uber-go/tally v3.3.13+incompatible h1:5ic2UsDwjcWsw9jvEdWEE2XsmGCLMTt5Ukg4d74fed4=
+github.com/uber-go/tally v3.3.13+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
+github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
+github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
+github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/uber/tchannel-go v1.12.0 h1:xqfXzViTPQxoA/KXBfnkXlwBn7JVpa7MVRUzWjVHMbI=
+github.com/uber/tchannel-go v1.12.0/go.mod h1:Rrgz1eL8kMjW/nEzZos0t+Heq0O4LhnUJVA32OvWKHo=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
+github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
+github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
+github.com/vmihailenco/msgpack v2.8.3+incompatible h1:76LCLwxS08gKHRpGA10PBxfWk72JfUH6mgzp2+URwYM=
+github.com/vmihailenco/msgpack v2.8.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
+github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
+github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
+github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b h1:3kC4J3eQF6p1UEfQTkC67eEeb3rTk+shQqdX6tFyq9Q=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/config v1.4.0 h1:upnMPpMm6WlbZtXoasNkK4f0FhxwS+W4Iqz5oNznehQ=
+go.uber.org/config v1.4.0/go.mod h1:aCyrMHmUAc/s2h9sv1koP84M9ZF/4K+g2oleyESO/Ig=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
+go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA=
+golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8=
+golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191104232314-dc038396d1f0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200305205014-bc073721adb6/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200601175630-2caf76543d99 h1:deddXmhOJb/bvD/4M/j2AUMrhHeh6GkqykJSCWyTNVk=
+golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/go-ini/ini.v1 v1.57.0 h1:e9cP3G5H9zMIwvQnUDbnD7k5SFuSbrPWdSAV8VHKxX8=
+gopkg.in/go-ini/ini.v1 v1.57.0/go.mod h1:M74/hG4RTwbkZyTEZ9iQwM4v6dFD4u6QBjoqT/pM8Kg=
+gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.7.0 h1:FjlsYAKvSv6gNHagVRD7pgOLNgDX/nWcMAmX9XjTK8I=
+gopkg.in/go-playground/validator.v9 v9.7.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho=
+gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
+gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
+gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
+gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
+gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/russross/blackfriday.v2 v2.0.0 h1:+FlnIV8DSQnT7NZ43hcVKcdJdzZoeCmJj4Ql8gq5keA=
+gopkg.in/russross/blackfriday.v2 v2.0.0/go.mod h1:6sSBNz/GtOm/pJTuh5UmBK2ZHfmnxGbl2NZg1UliSOI=
+gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=
+gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=
+gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1 h1:1IZMbdoz1SZAQ4HMRwAP0FPSyXt7ywsiJ4q7OPTEu4A=
+gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0=
+k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
+k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/integrations/grafana/m3aggregator_dashboard.json b/integrations/grafana/m3aggregator_dashboard.json
index 256b9af617..0e3b67b73a 100644
--- a/integrations/grafana/m3aggregator_dashboard.json
+++ b/integrations/grafana/m3aggregator_dashboard.json
@@ -1,4 +1,30 @@
{
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "5.2.4"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": "5.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "5.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "singlestat",
+ "name": "Singlestat",
+ "version": "5.0.0"
+ }
+ ],
"annotations": {
"list": [
{
@@ -15,11 +41,12 @@
"editable": true,
"gnetId": null,
"graphTooltip": 1,
- "iteration": 1575560854098,
+ "iteration": 1582905705847,
"links": [],
"panels": [
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
@@ -37,6 +64,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -48,6 +76,7 @@
"x": 0,
"y": 1
},
+ "hiddenSeries": false,
"id": 3,
"legend": {
"avg": false,
@@ -140,6 +169,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -151,6 +181,7 @@
"x": 8,
"y": 1
},
+ "hiddenSeries": false,
"id": 4,
"legend": {
"avg": false,
@@ -243,6 +274,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -254,6 +286,7 @@
"x": 16,
"y": 1
},
+ "hiddenSeries": false,
"id": 12,
"legend": {
"avg": false,
@@ -363,6 +396,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -374,6 +408,7 @@
"x": 0,
"y": 8
},
+ "hiddenSeries": false,
"id": 93,
"legend": {
"avg": false,
@@ -477,6 +512,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -488,6 +524,7 @@
"x": 8,
"y": 8
},
+ "hiddenSeries": false,
"id": 92,
"legend": {
"avg": false,
@@ -563,6 +600,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -574,6 +612,7 @@
"x": 16,
"y": 8
},
+ "hiddenSeries": false,
"id": 47,
"legend": {
"avg": false,
@@ -674,6 +713,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
@@ -682,6 +722,7 @@
"x": 0,
"y": 15
},
+ "hiddenSeries": false,
"id": 170,
"legend": {
"avg": false,
@@ -760,6 +801,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
@@ -768,6 +810,7 @@
"x": 8,
"y": 15
},
+ "hiddenSeries": false,
"id": 171,
"legend": {
"avg": false,
@@ -846,6 +889,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -857,6 +901,7 @@
"x": 16,
"y": 15
},
+ "hiddenSeries": false,
"id": 195,
"legend": {
"avg": false,
@@ -932,11 +977,12 @@
},
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 30
+ "y": 22
},
"id": 190,
"panels": [],
@@ -949,14 +995,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 0,
- "y": 31
+ "y": 23
},
+ "hiddenSeries": false,
"id": 122,
"legend": {
"avg": false,
@@ -1035,14 +1083,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 6,
- "y": 31
+ "y": 23
},
+ "hiddenSeries": false,
"id": 132,
"legend": {
"avg": false,
@@ -1122,14 +1172,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 12,
- "y": 31
+ "y": 23
},
+ "hiddenSeries": false,
"id": 141,
"legend": {
"avg": false,
@@ -1207,14 +1259,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 18,
- "y": 31
+ "y": 23
},
+ "hiddenSeries": false,
"id": 133,
"legend": {
"avg": false,
@@ -1290,11 +1344,12 @@
},
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 38
+ "y": 30
},
"id": 194,
"panels": [],
@@ -1306,14 +1361,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 0,
- "y": 39
+ "y": 31
},
+ "hiddenSeries": false,
"id": 134,
"legend": {
"avg": false,
@@ -1391,14 +1448,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 6,
- "y": 39
+ "y": 31
},
+ "hiddenSeries": false,
"id": 123,
"legend": {
"avg": false,
@@ -1477,14 +1536,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 12,
- "y": 39
+ "y": 31
},
+ "hiddenSeries": false,
"id": 140,
"legend": {
"avg": false,
@@ -1563,14 +1624,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 18,
- "y": 39
+ "y": 31
},
+ "hiddenSeries": false,
"id": 130,
"legend": {
"avg": false,
@@ -1654,14 +1717,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 0,
- "y": 46
+ "y": 38
},
+ "hiddenSeries": false,
"id": 129,
"legend": {
"avg": false,
@@ -1765,14 +1830,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 6,
- "y": 46
+ "y": 38
},
+ "hiddenSeries": false,
"id": 128,
"legend": {
"avg": false,
@@ -1862,14 +1929,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 12,
- "y": 46
+ "y": 38
},
+ "hiddenSeries": false,
"id": 125,
"legend": {
"avg": false,
@@ -1948,14 +2017,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 18,
- "y": 46
+ "y": 38
},
+ "hiddenSeries": false,
"id": 139,
"legend": {
"avg": false,
@@ -2031,11 +2102,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 53
+ "y": 45
},
"id": 173,
"panels": [
@@ -2044,6 +2116,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2141,6 +2214,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2234,6 +2308,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2321,6 +2396,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2409,6 +2485,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2502,6 +2579,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2595,11 +2673,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 54
+ "y": 46
},
"id": 174,
"panels": [
@@ -2608,6 +2687,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2703,6 +2783,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2792,6 +2873,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2881,6 +2963,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -2970,6 +3053,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3059,6 +3143,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3150,11 +3235,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 55
+ "y": 47
},
"id": 175,
"panels": [
@@ -3163,6 +3249,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3252,6 +3339,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3341,6 +3429,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3430,6 +3519,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3521,11 +3611,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 56
+ "y": 48
},
"id": 176,
"panels": [
@@ -3534,6 +3625,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3623,6 +3715,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3712,6 +3805,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3801,6 +3895,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3890,6 +3985,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -3979,6 +4075,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4070,11 +4167,12 @@
},
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 57
+ "y": 49
},
"id": 177,
"panels": [],
@@ -4087,6 +4185,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4096,8 +4195,9 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 58
+ "y": 50
},
+ "hiddenSeries": false,
"id": 42,
"legend": {
"avg": false,
@@ -4173,6 +4273,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4182,8 +4283,9 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 58
+ "y": 50
},
+ "hiddenSeries": false,
"id": 50,
"legend": {
"avg": false,
@@ -4261,6 +4363,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4270,8 +4373,9 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 58
+ "y": 50
},
+ "hiddenSeries": false,
"id": 43,
"legend": {
"avg": false,
@@ -4354,6 +4458,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4363,8 +4468,9 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 65
+ "y": 57
},
+ "hiddenSeries": false,
"id": 45,
"legend": {
"avg": false,
@@ -4447,6 +4553,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4456,8 +4563,9 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 65
+ "y": 57
},
+ "hiddenSeries": false,
"id": 142,
"legend": {
"avg": false,
@@ -4535,6 +4643,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4544,8 +4653,9 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 65
+ "y": 57
},
+ "hiddenSeries": false,
"id": 143,
"legend": {
"avg": false,
@@ -4623,6 +4733,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4632,8 +4743,9 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 72
+ "y": 64
},
+ "hiddenSeries": false,
"id": 88,
"legend": {
"avg": false,
@@ -4716,6 +4828,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4725,8 +4838,9 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 72
+ "y": 64
},
+ "hiddenSeries": false,
"id": 44,
"legend": {
"avg": false,
@@ -4804,6 +4918,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4813,8 +4928,9 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 72
+ "y": 64
},
+ "hiddenSeries": false,
"id": 78,
"legend": {
"avg": false,
@@ -4892,6 +5008,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4901,8 +5018,9 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 79
+ "y": 71
},
+ "hiddenSeries": false,
"id": 79,
"legend": {
"avg": false,
@@ -4980,6 +5098,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -4989,8 +5108,9 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 79
+ "y": 71
},
+ "hiddenSeries": false,
"id": 84,
"legend": {
"avg": false,
@@ -5065,11 +5185,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 86
+ "y": 78
},
"id": 178,
"panels": [
@@ -5078,6 +5199,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5166,6 +5288,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5254,6 +5377,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5342,6 +5466,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5435,6 +5560,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5523,6 +5649,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5634,11 +5761,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 87
+ "y": 79
},
"id": 179,
"panels": [
@@ -5647,6 +5775,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5740,6 +5869,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5828,6 +5958,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -5916,6 +6047,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -6010,6 +6142,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -6098,6 +6231,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -6186,6 +6320,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -6276,11 +6411,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 88
+ "y": 80
},
"id": 180,
"panels": [
@@ -6289,6 +6425,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6375,6 +6512,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6461,6 +6599,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6547,6 +6686,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6633,6 +6773,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6719,6 +6860,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6805,6 +6947,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6891,6 +7034,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -6977,6 +7121,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -7063,6 +7208,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -7154,6 +7300,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -7240,6 +7387,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -7328,11 +7476,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 89
+ "y": 81
},
"id": 181,
"panels": [
@@ -7341,6 +7490,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -7444,6 +7594,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -7547,6 +7698,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -7660,6 +7812,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -7749,6 +7902,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -7838,6 +7992,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -7927,6 +8082,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8015,6 +8171,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8103,6 +8260,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8191,6 +8349,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8279,6 +8438,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8367,6 +8527,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8455,6 +8616,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8543,6 +8705,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8633,11 +8796,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 90
+ "y": 82
},
"id": 182,
"panels": [
@@ -8646,6 +8810,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8735,6 +8900,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8824,6 +8990,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -8913,6 +9080,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9002,6 +9170,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9093,11 +9262,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 91
+ "y": 83
},
"id": 183,
"panels": [
@@ -9106,6 +9276,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9195,6 +9366,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9286,11 +9458,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 92
+ "y": 84
},
"id": 184,
"panels": [
@@ -9299,6 +9472,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9392,6 +9566,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9485,6 +9660,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9573,6 +9749,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9661,6 +9838,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9749,6 +9927,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9839,11 +10018,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 93
+ "y": 85
},
"id": 185,
"panels": [
@@ -9852,6 +10032,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -9940,6 +10121,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10028,6 +10210,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10116,6 +10299,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10204,6 +10388,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10292,6 +10477,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10382,11 +10568,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 94
+ "y": 86
},
"id": 186,
"panels": [
@@ -10395,6 +10582,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10484,6 +10672,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10573,6 +10762,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10664,11 +10854,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 95
+ "y": 87
},
"id": 188,
"panels": [
@@ -10677,6 +10868,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 1,
@@ -10785,6 +10977,7 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
@@ -10798,6 +10991,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -10889,11 +11083,12 @@
},
{
"collapsed": true,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 97
+ "y": 89
},
"id": 191,
"panels": [
@@ -10902,6 +11097,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
@@ -10988,6 +11184,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
@@ -11074,6 +11271,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -11160,6 +11358,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -11246,6 +11445,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -11332,6 +11532,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -11418,6 +11619,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -11511,13 +11713,28 @@
}
],
"refresh": false,
- "schemaVersion": 19,
+ "schemaVersion": 22,
"style": "dark",
"tags": [
"disable-sync"
],
"templating": {
"list": [
+ {
+ "current": {
+ "text": "M3Query - Prometheus",
+ "value": "M3Query - Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
{
"allFormat": "glob",
"allValue": ".*",
@@ -11527,7 +11744,7 @@
"$__all"
]
},
- "datasource": null,
+ "datasource": "$datasource",
"definition": "{__name__=~\"m3aggregator_aggregator_flush_handler.*\"}",
"hide": 0,
"includeAll": true,
@@ -11700,7 +11917,7 @@
"$__all"
]
},
- "datasource": "M3DB",
+ "datasource": "$datasource",
"definition": "m3aggregator_aggregator_flush_handler_placement_update{backend=\"m3msg\"}",
"hide": 0,
"includeAll": true,
@@ -11730,7 +11947,7 @@
"$__all"
]
},
- "datasource": "M3DB",
+ "datasource": "$datasource",
"definition": "m3aggregator_aggregator_flush_handler_placement_update{backend=\"m3msg\"}",
"hide": 0,
"includeAll": true,
@@ -11824,5 +12041,5 @@
"timezone": "browser",
"title": "M3: Aggregator Details",
"uid": "000000317",
- "version": 61
-}
\ No newline at end of file
+ "version": 62
+}
diff --git a/integrations/grafana/m3aggregator_end_to_end_details.json b/integrations/grafana/m3aggregator_end_to_end_details.json
index 4c0df4f9ee..f605cc9b4e 100644
--- a/integrations/grafana/m3aggregator_end_to_end_details.json
+++ b/integrations/grafana/m3aggregator_end_to_end_details.json
@@ -1,4 +1,30 @@
{
+ "__requires": [
+ {
+ "id": "grafana",
+ "name": "Grafana",
+ "type": "grafana",
+ "version": "5.2.4"
+ },
+ {
+ "id": "graph",
+ "name": "Graph",
+ "type": "panel",
+ "version": "5.0.0"
+ },
+ {
+ "id": "prometheus",
+ "name": "Prometheus",
+ "type": "datasource",
+ "version": "5.0.0"
+ },
+ {
+ "id": "singlestat",
+ "name": "Singlestat",
+ "type": "panel",
+ "version": "5.0.0"
+ }
+ ],
"annotations": {
"list": [
{
@@ -16,11 +42,12 @@
"gnetId": null,
"graphTooltip": 1,
"id": 32,
- "iteration": 1582150012033,
+ "iteration": 1585680698382,
"links": [],
"panels": [
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
@@ -37,32 +64,471 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "fill": 1,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 198,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(m3aggregator_aggregator_client_writeForwarded_success[$step]))",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Write Timed/Forwarded Success",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 6,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 199,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(m3aggregator_aggregator_client_writeForwarded_errors[$step]))",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Write Timed/Forwarded Errors",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 12,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 200,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(m3aggregator_aggregator_client_writer_manager_writer_queue_successes[$step]))",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Write Queue Success",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 18,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 201,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(m3aggregator_aggregator_client_writer_manager_writer_queue_dropped[$step]))",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(rate(m3aggregator_aggregator_client_writer_manager_writer_queue_errors[$step]))",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Write Queue Dropped/Errors",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ },
+ "hiddenSeries": false,
+ "id": 212,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(coordinator_downsampler_remote_aggregator_client_message_buffered{m3_cluster=~\"$cluster\"}) by (instance)",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Total Messages Buffered",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
"fillGradient": 0,
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 6,
- "x": 0,
- "y": 1
+ "x": 6,
+ "y": 9
},
- "id": 198,
+ "hiddenSeries": false,
+ "id": 217,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
- "show": true,
+ "show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
- "nullPointMode": "null",
+ "links": [],
+ "nullPointMode": "connected",
"options": {
"dataLinks": []
},
"percentage": false,
- "pointradius": 2,
+ "pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
@@ -71,7 +537,8 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(coordinator_downsampler_remote_aggregator_client_writeForwarded_success[$step]))",
+ "expr": "sum(coordinator_downsampler_remote_aggregator_client_queue_size{m3_cluster=~\"$cluster\"}) by (instance)",
+ "legendFormat": "{{instance}}",
"refId": "A"
}
],
@@ -79,9 +546,9 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Write Timed/Forwarded Success",
+ "title": "Total Messages Queue Length",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -121,33 +588,36 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 6,
- "x": 6,
- "y": 1
+ "x": 12,
+ "y": 9
},
- "id": 199,
+ "hiddenSeries": false,
+ "id": 213,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
- "show": true,
+ "show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
+ "links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
- "pointradius": 2,
- "points": false,
+ "pointradius": 5,
+ "points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
@@ -155,7 +625,8 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(coordinator_downsampler_remote_aggregator_client_writeForwarded_errors[$step]))",
+ "expr": "rate(coordinator_downsampler_remote_aggregator_client_message_dropped{m3_cluster=~\"$cluster\"}[1m])",
+ "legendFormat": "{{instance}} {{__name__}}",
"refId": "A"
}
],
@@ -163,7 +634,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Write Timed/Forwarded Errors",
+ "title": "Message Dropped",
"tooltip": {
"shared": true,
"sort": 0,
@@ -205,33 +676,36 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 6,
- "x": 12,
- "y": 1
+ "x": 18,
+ "y": 9
},
- "id": 200,
+ "hiddenSeries": false,
+ "id": 214,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
- "show": true,
+ "show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
+ "links": [],
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
- "pointradius": 2,
- "points": false,
+ "pointradius": 5,
+ "points": true,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
@@ -239,15 +713,21 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(coordinator_downsampler_remote_aggregator_client_writer_manager_writer_queue_successes[$step]))",
+ "expr": "rate(coordinator_downsampler_remote_aggregator_client_drop_oldest_async{m3_cluster=~\"$cluster\"}[1m])",
+ "legendFormat": "{{instance}} drop_async",
"refId": "A"
+ },
+ {
+ "expr": "rate(coordinator_downsampler_remote_aggregator_client_drop_oldest_sync{m3_cluster=~\"$cluster\"}[1m])",
+ "legendFormat": "{{instance}} drop_sync",
+ "refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Write Queue Success",
+ "title": "Drop types",
"tooltip": {
"shared": true,
"sort": 0,
@@ -289,32 +769,123 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "fill": 1,
+ "datasource": "$datasource",
+ "fill": 0,
"fillGradient": 0,
"gridPos": {
- "h": 8,
+ "h": 7,
"w": 6,
- "x": 18,
- "y": 1
+ "x": 0,
+ "y": 16
},
- "id": 201,
+ "hiddenSeries": false,
+ "id": 218,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "coordinator_downsampler_remote_aggregator_client_message_write_delay{m3_cluster=\"$cluster\"}",
+ "legendFormat": "p{{quantile}} {{instance}} ",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Messages Write Delay",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
"show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 215,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
- "nullPointMode": "null",
+ "links": [],
+ "nullPointMode": "connected",
"options": {
"dataLinks": []
},
"percentage": false,
- "pointradius": 2,
+ "pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
@@ -323,19 +894,104 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(coordinator_downsampler_remote_aggregator_client_writer_manager_writer_queue_dropped[$step]))",
+ "expr": "sum(coordinator_downsampler_remote_aggregator_client_byte_buffered{m3_cluster=~\"$cluster\"}) by (instance)",
+ "legendFormat": "{{instance}}",
"refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Total Bytes buffered",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
{
- "expr": "sum(rate(coordinator_downsampler_remote_aggregator_client_writer_manager_writer_queue_errors[$step]))",
- "refId": "B"
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 216,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(coordinator_downsampler_remote_aggregator_client_buffer_byte_dropped{m3_cluster=~\"$cluster\"}[1m])",
+ "legendFormat": "{{instance}}",
+ "refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Write Queue Dropped/Errors",
+ "title": "Bytes Dropped",
"tooltip": {
"shared": true,
"sort": 0,
@@ -351,7 +1007,7 @@
},
"yaxes": [
{
- "format": "short",
+ "format": "bytes",
"label": null,
"logBase": 1,
"max": null,
@@ -374,11 +1030,12 @@
},
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 9
+ "y": 23
},
"id": 194,
"panels": [],
@@ -390,6 +1047,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -399,8 +1057,9 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 10
+ "y": 24
},
+ "hiddenSeries": false,
"id": 4,
"legend": {
"avg": false,
@@ -493,6 +1152,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"editable": true,
"error": false,
"fill": 0,
@@ -502,8 +1162,9 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 10
+ "y": 24
},
+ "hiddenSeries": false,
"id": 12,
"legend": {
"avg": false,
@@ -613,14 +1274,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
- "y": 10
+ "y": 24
},
+ "hiddenSeries": false,
"id": 171,
"legend": {
"avg": false,
@@ -696,11 +1359,12 @@
},
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 17
+ "y": 31
},
"id": 191,
"panels": [],
@@ -713,14 +1377,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 0,
- "y": 18
+ "y": 32
},
+ "hiddenSeries": false,
"id": 135,
"legend": {
"avg": false,
@@ -799,14 +1465,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 6,
- "y": 18
+ "y": 32
},
+ "hiddenSeries": false,
"id": 126,
"legend": {
"avg": false,
@@ -885,14 +1553,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 12,
- "y": 18
+ "y": 32
},
+ "hiddenSeries": false,
"id": 192,
"legend": {
"avg": false,
@@ -976,14 +1646,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 0,
- "y": 25
+ "y": 39
},
+ "hiddenSeries": false,
"id": 124,
"legend": {
"avg": false,
@@ -1062,14 +1734,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 6,
- "y": 25
+ "y": 39
},
+ "hiddenSeries": false,
"id": 127,
"legend": {
"avg": false,
@@ -1145,11 +1819,12 @@
},
{
"collapsed": false,
+ "datasource": "$datasource",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 32
+ "y": 46
},
"id": 203,
"panels": [],
@@ -1161,14 +1836,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 0,
- "y": 33
+ "y": 47
},
+ "hiddenSeries": false,
"id": 205,
"legend": {
"avg": false,
@@ -1245,14 +1922,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 6,
- "y": 33
+ "y": 47
},
+ "hiddenSeries": false,
"id": 206,
"legend": {
"avg": false,
@@ -1349,14 +2028,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 12,
- "y": 33
+ "y": 47
},
+ "hiddenSeries": false,
"id": 207,
"legend": {
"avg": false,
@@ -1433,14 +2114,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 18,
- "y": 33
+ "y": 47
},
+ "hiddenSeries": false,
"id": 208,
"legend": {
"avg": false,
@@ -1517,14 +2200,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 0,
- "y": 40
+ "y": 54
},
+ "hiddenSeries": false,
"id": 210,
"legend": {
"avg": false,
@@ -1601,14 +2286,16 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "$datasource",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 6,
"x": 6,
- "y": 40
+ "y": 54
},
+ "hiddenSeries": false,
"id": 211,
"legend": {
"avg": false,
@@ -1681,12 +2368,27 @@
}
}
],
- "refresh": false,
- "schemaVersion": 19,
+ "refresh": "5s",
+ "schemaVersion": 21,
"style": "dark",
"tags": [],
"templating": {
"list": [
+ {
+ "current": {
+ "text": "M3Query - Prometheus",
+ "value": "M3Query - Prometheus"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
{
"allValue": null,
"current": {
@@ -1706,9 +2408,9 @@
"regex": "",
"skipUrlSync": false,
"sort": 1,
- "tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
+ "tagValuesQuery": "",
"type": "query",
"useTags": false
},
@@ -1716,9 +2418,11 @@
"allFormat": "glob",
"allValue": ".*",
"current": {
+ "selected": false,
"text": "All",
"value": "$__all"
},
+ "datasource": "$datasource",
"definition": "{__name__=~\"m3aggregator_aggregator_flush_handler.*\"}",
"hide": 0,
"includeAll": true,
@@ -1884,9 +2588,11 @@
"allFormat": "glob",
"allValue": ".*",
"current": {
+ "selected": false,
"text": "All",
"value": "$__all"
},
+ "datasource": "$datasource",
"definition": "m3aggregator_aggregator_flush_handler_placement_update{m3_cluster=~\"$cluster\",backend=\"m3msg\"}",
"hide": 0,
"includeAll": true,
@@ -1911,9 +2617,11 @@
"allFormat": "glob",
"allValue": ".*",
"current": {
+ "selected": false,
"text": "All",
"value": "$__all"
},
+ "datasource": "$datasource",
"definition": "m3aggregator_aggregator_flush_handler_placement_update{m3_cluster=~\"$cluster\",backend=\"m3msg\"}",
"hide": 0,
"includeAll": true,
@@ -2007,5 +2715,5 @@
"timezone": "browser",
"title": "M3: Aggregator End-to-End Details",
"uid": "sqQiAbQZz",
- "version": 9
+ "version": 16
}
\ No newline at end of file
diff --git a/integrations/grafana/m3db_dashboard.json b/integrations/grafana/m3db_dashboard.json
index c9ee4a025d..6bf240ce25 100644
--- a/integrations/grafana/m3db_dashboard.json
+++ b/integrations/grafana/m3db_dashboard.json
@@ -41,8 +41,7 @@
"editable": true,
"gnetId": null,
"graphTooltip": 1,
- "id": null,
- "iteration": 1538144350249,
+ "iteration": 1592797277408,
"links": [],
"panels": [
{
@@ -104,6 +103,7 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
+ "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -121,7 +121,7 @@
"lineColor": "rgb(31, 120, 193)",
"show": true
},
- "tableColumn": "Value",
+ "tableColumn": "",
"targets": [
{
"expr": "sum(database_bootstrapped{instance=~\"$instance\"} == bool 1)",
@@ -146,7 +146,10 @@
}
],
"valueName": "current",
- "y_formats": ["short", "short"]
+ "y_formats": [
+ "short",
+ "short"
+ ]
},
{
"cacheTimeout": null,
@@ -193,6 +196,7 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
+ "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -210,7 +214,7 @@
"lineColor": "rgb(31, 120, 193)",
"show": true
},
- "tableColumn": "Value",
+ "tableColumn": "",
"targets": [
{
"expr": "sum(database_bootstrapped{instance=~\"$instance\"} == bool 0)",
@@ -234,7 +238,10 @@
}
],
"valueName": "current",
- "y_formats": ["short", "short"]
+ "y_formats": [
+ "short",
+ "short"
+ ]
},
{
"cacheTimeout": null,
@@ -281,6 +288,7 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
+ "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -298,7 +306,7 @@
"lineColor": "rgb(31, 120, 193)",
"show": true
},
- "tableColumn": "revision",
+ "tableColumn": "",
"targets": [
{
"expr": "count(build_information{instance=~\"$instance\"}) by (revision)",
@@ -322,7 +330,10 @@
}
],
"valueName": "current",
- "y_formats": ["short", "short"]
+ "y_formats": [
+ "short",
+ "short"
+ ]
},
{
"cacheTimeout": null,
@@ -369,6 +380,7 @@
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
+ "options": {},
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
@@ -386,7 +398,7 @@
"lineColor": "rgb(31, 120, 193)",
"show": true
},
- "tableColumn": "go_version",
+ "tableColumn": "",
"targets": [
{
"expr": "count(build_information{instance=~\"$instance\"}) by (go_version)",
@@ -410,7 +422,10 @@
}
],
"valueName": "current",
- "y_formats": ["short", "short"]
+ "y_formats": [
+ "short",
+ "short"
+ ]
},
{
"collapsed": false,
@@ -435,6 +450,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -442,6 +458,7 @@
"x": 0,
"y": 5
},
+ "hiddenSeries": false,
"id": 1,
"legend": {
"avg": false,
@@ -456,6 +473,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -483,6 +503,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Commit Log Queue Length",
"tooltip": {
@@ -529,6 +550,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -536,6 +558,7 @@
"x": 8,
"y": 5
},
+ "hiddenSeries": false,
"id": 2,
"legend": {
"avg": false,
@@ -550,6 +573,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -578,6 +604,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Commit Log Writes / Second (Includes replication)",
"tooltip": {
@@ -624,6 +651,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -631,6 +659,7 @@
"x": 16,
"y": 5
},
+ "hiddenSeries": false,
"id": 45,
"legend": {
"avg": false,
@@ -645,6 +674,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -667,6 +699,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "New Series Inserts / Second",
"tooltip": {
@@ -1034,6 +1067,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1041,6 +1075,7 @@
"x": 0,
"y": 14
},
+ "hiddenSeries": false,
"id": 81,
"legend": {
"avg": false,
@@ -1055,6 +1090,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1114,6 +1152,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Service Reads / Writes / Second",
"tooltip": {
@@ -1160,6 +1199,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1167,6 +1207,7 @@
"x": 8,
"y": 14
},
+ "hiddenSeries": false,
"id": 82,
"legend": {
"avg": false,
@@ -1181,6 +1222,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1200,6 +1244,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Server Side Write Latency (p99)",
"tooltip": {
@@ -1246,6 +1291,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1253,6 +1299,7 @@
"x": 16,
"y": 14
},
+ "hiddenSeries": false,
"id": 83,
"legend": {
"avg": false,
@@ -1267,6 +1314,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1287,6 +1337,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Server Side Fetch Latency (p99)",
"tooltip": {
@@ -1347,6 +1398,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1354,6 +1406,7 @@
"x": 0,
"y": 22
},
+ "hiddenSeries": false,
"id": 13,
"legend": {
"avg": false,
@@ -1368,6 +1421,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1389,6 +1445,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Process CPU Seconds",
"tooltip": {
@@ -1434,6 +1491,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1441,6 +1499,7 @@
"x": 8,
"y": 22
},
+ "hiddenSeries": false,
"id": 14,
"legend": {
"avg": false,
@@ -1455,6 +1514,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1487,6 +1549,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Memory Utilization - Resident",
"tooltip": {
@@ -1532,6 +1595,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1539,6 +1603,7 @@
"x": 16,
"y": 22
},
+ "hiddenSeries": false,
"id": 110,
"legend": {
"avg": false,
@@ -1553,6 +1618,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1575,6 +1643,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Memory - Mmaps (Anon and File)",
"tooltip": {
@@ -1620,6 +1689,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1627,6 +1697,7 @@
"x": 0,
"y": 29
},
+ "hiddenSeries": false,
"id": 61,
"legend": {
"avg": false,
@@ -1641,6 +1712,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1661,6 +1735,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Num File Descriptors",
"tooltip": {
@@ -1706,6 +1781,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1713,6 +1789,7 @@
"x": 8,
"y": 29
},
+ "hiddenSeries": false,
"id": 49,
"legend": {
"avg": false,
@@ -1727,6 +1804,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1747,6 +1827,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Num Go Routines",
"tooltip": {
@@ -1792,6 +1873,7 @@
"editable": true,
"error": false,
"fill": 0,
+ "fillGradient": 0,
"grid": {},
"gridPos": {
"h": 7,
@@ -1799,6 +1881,7 @@
"x": 16,
"y": 29
},
+ "hiddenSeries": false,
"id": 39,
"legend": {
"avg": false,
@@ -1813,6 +1896,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -1832,6 +1918,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Disk Free Space - Not Implemented",
"tooltip": {
@@ -5194,12 +5281,14 @@
"dashes": false,
"datasource": "$datasource",
"fill": 1,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 46
},
+ "hiddenSeries": false,
"id": 69,
"legend": {
"avg": false,
@@ -5214,6 +5303,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -5244,6 +5336,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Index Queue",
"tooltip": {
@@ -5288,13 +5381,211 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "fill": 0,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 46
+ },
+ "hiddenSeries": false,
+ "id": 111,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "name:index-error error_type:async-insert | sum",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(dbindex_index_error{instance=~\"$instance\"}[$step])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Indexing Errors",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 46
},
+ "hiddenSeries": false,
+ "id": 112,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "name:index-error error_type:async-insert | sum",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(dbshard_insert_async_errors{instance=~\"$instance\"}[$step])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "refId": "A"
+ },
+ {
+ "expr": "rate(dbshard_insert_queue_inserts_batch_errors{instance=~\"$instance\"}[$step])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Shard Insert Errors",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 53
+ },
+ "hiddenSeries": false,
"id": 79,
"legend": {
"avg": false,
@@ -5309,6 +5600,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -5323,10 +5617,15 @@
"format": "time_series",
"intervalFactor": 1,
"refId": "A"
+ },
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(dbindex_insert_end_to_end_latency_bucket{instance=~\"$instance\"}[$step])) by (le, instance)) ",
+ "refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Indexing End-to-End Latency - P99",
"tooltip": {
@@ -5364,22 +5663,7 @@
"align": false,
"alignLevel": null
}
- }
- ],
- "repeat": null,
- "title": "Index Queue",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 46
- },
- "id": 108,
- "panels": [
+ },
{
"aliasColors": {},
"bars": false,
@@ -5387,13 +5671,15 @@
"dashes": false,
"datasource": "$datasource",
"fill": 0,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
- "w": 12,
- "x": 0,
- "y": 47
+ "w": 8,
+ "x": 8,
+ "y": 53
},
- "id": 85,
+ "hiddenSeries": false,
+ "id": 113,
"legend": {
"avg": false,
"current": false,
@@ -5407,6 +5693,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -5417,18 +5706,20 @@
"steppedLine": false,
"targets": [
{
- "expr": "database_tick_index_num_docs{instance=~\"$instance\"}",
+ "expr": "index_block_compaction_task_run_latency{instance=~\"$instance\",quantile=\"0.99\",compaction_type=\"foreground\"}",
"format": "time_series",
"intervalFactor": 1,
+ "legendFormat": "{{compaction_type}} {{instance}} p{{quantile}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
- "title": "Num Docs",
+ "title": "Indexing foreground compactions",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -5442,7 +5733,7 @@
},
"yaxes": [
{
- "format": "short",
+ "format": "s",
"label": null,
"logBase": 1,
"max": null,
@@ -5470,13 +5761,15 @@
"dashes": false,
"datasource": "$datasource",
"fill": 0,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
- "w": 12,
- "x": 12,
- "y": 47
+ "w": 8,
+ "x": 16,
+ "y": 53
},
- "id": 86,
+ "hiddenSeries": false,
+ "id": 114,
"legend": {
"avg": false,
"current": false,
@@ -5490,6 +5783,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -5500,18 +5796,20 @@
"steppedLine": false,
"targets": [
{
- "expr": "database_tick_index_num_segments{instance=~\"$instance\"}",
+ "expr": "index_block_compaction_task_run_latency{instance=~\"$instance\",quantile=\"0.99\",compaction_type=\"background\"}",
"format": "time_series",
"intervalFactor": 1,
+ "legendFormat": "{{compaction_type}} {{instance}} p{{quantile}}",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
- "title": "Num Segments",
+ "title": "Indexing background compactions",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -5525,7 +5823,7 @@
},
"yaxes": [
{
- "format": "short",
+ "format": "s",
"label": null,
"logBase": 1,
"max": null,
@@ -5553,12 +5851,14 @@
"dashes": false,
"datasource": "$datasource",
"fill": 0,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
- "y": 54
+ "y": 60
},
+ "hiddenSeries": false,
"id": 87,
"legend": {
"avg": false,
@@ -5573,6 +5873,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -5583,9 +5886,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "dbindex_num_active_compactions{instance=~\"$instance\"}",
+ "expr": "rate(index_block_compaction_plan_run_latency_count{instance=~\"$instance\"}[$step])",
"format": "time_series",
"intervalFactor": 1,
+ "legendFormat": "",
"refId": "A"
},
{
@@ -5598,8 +5902,113 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Num Compactions",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "repeat": null,
+ "title": "Index Queue",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 46
+ },
+ "id": 108,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 68
+ },
+ "hiddenSeries": false,
+ "id": 85,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "database_tick_index_num_docs{instance=~\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Num Docs",
"tooltip": {
"shared": true,
"sort": 0,
@@ -5643,13 +6052,15 @@
"dashes": false,
"datasource": "$datasource",
"fill": 0,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
- "w": 8,
- "x": 16,
- "y": 54
+ "w": 12,
+ "x": 12,
+ "y": 68
},
- "id": 88,
+ "hiddenSeries": false,
+ "id": 86,
"legend": {
"avg": false,
"current": false,
@@ -5663,6 +6074,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": false,
@@ -5673,7 +6087,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "dbindex_compaction_latency{instance=~\"$instance\"}",
+ "expr": "database_tick_index_num_segments{instance=~\"$instance\"}",
"format": "time_series",
"intervalFactor": 1,
"refId": "A"
@@ -5681,8 +6095,9 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
- "title": "Compaction Latency",
+ "title": "Num Segments",
"tooltip": {
"shared": true,
"sort": 0,
@@ -5698,7 +6113,7 @@
},
"yaxes": [
{
- "format": "ms",
+ "format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -5726,12 +6141,14 @@
"dashes": false,
"datasource": "$datasource",
"fill": 0,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 61
+ "y": 75
},
+ "hiddenSeries": false,
"id": 74,
"legend": {
"avg": false,
@@ -5746,6 +6163,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "dataLinks": []
+ },
"percentage": false,
"pointradius": 5,
"points": true,
@@ -5770,6 +6190,7 @@
],
"thresholds": [],
"timeFrom": null,
+ "timeRegions": [],
"timeShift": null,
"title": "Evicted/Sealed blocks",
"tooltip": {
@@ -5815,11 +6236,29 @@
}
],
"refresh": false,
- "schemaVersion": 16,
+ "schemaVersion": 22,
"style": "dark",
- "tags": ["disable-sync"],
+ "tags": [],
"templating": {
"list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
{
"allFormat": "glob",
"allValue": null,
@@ -5923,6 +6362,7 @@
],
"query": "series_pool,block_pool,encoder_pool,context_pool,iterator_pool,multi_iterator_pool,segment_reader_pool,bytes_pool,fetch_block_metadata_results_pool,fetch_blocks_metadata_results_pool,block_metadata_pool,block_metadata_slice_pool,blocks_metadata_pool,blocks_metadata_slice_pool,host_block_metadata_slice_pool,identifier_pool",
"refresh": 0,
+ "skipUrlSync": false,
"type": "custom"
},
{
@@ -5960,12 +6400,19 @@
}
],
"query": "30s,1m,5m,10m",
+ "skipUrlSync": false,
"type": "custom"
},
{
"allValue": null,
- "current": {},
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
"datasource": "$datasource",
+ "definition": "",
"hide": 0,
"includeAll": true,
"label": "instance",
@@ -5975,27 +6422,13 @@
"query": "label_values(commitlog_writes_queued,instance)",
"refresh": 2,
"regex": "",
+ "skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
- },
- {
- "current": {
- "text": "M3Query - Prometheus",
- "value": "M3Query - Prometheus"
- },
- "hide": 0,
- "label": null,
- "name": "datasource",
- "options": [],
- "query": "prometheus",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "type": "datasource"
}
]
},
@@ -6017,10 +6450,20 @@
"2h",
"1d"
],
- "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"]
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
},
"timezone": "browser",
"title": "M3DB Node Details",
"uid": "99SFck0iza",
- "version": 4
-}
+ "version": 1
+}
\ No newline at end of file
diff --git a/integrations/systemd/m3dbnode.service b/integrations/systemd/m3dbnode.service
index e67412ec70..51d00a2e49 100644
--- a/integrations/systemd/m3dbnode.service
+++ b/integrations/systemd/m3dbnode.service
@@ -1,6 +1,6 @@
[Unit]
Description="M3DB Timeseries Database"
-Documentation=http://m3db.github.io/m3/
+Documentation=https://docs.m3db.io/
After=network.target
[Service]
diff --git a/kube/README.md b/kube/README.md
index 188def6d6f..50c5ad8049 100644
--- a/kube/README.md
+++ b/kube/README.md
@@ -1,7 +1,7 @@
# M3DB on Kubernetes
This doc is aimed at developers building M3DB on Kubernetes. End users should see our
-[how-to](https://m3db.github.io/m3/how_to/kubernetes) guide for more info.
+[how-to](https://docs.m3db.io/how_to/kubernetes) guide for more info.
## Bundling
diff --git a/kube/storage-fast-aws.yaml b/kube/storage-fast-aws.yaml
index f57e306981..18400a6a36 100644
--- a/kube/storage-fast-aws.yaml
+++ b/kube/storage-fast-aws.yaml
@@ -6,3 +6,4 @@ provisioner: kubernetes.io/aws-ebs
parameters:
type: io1
iopsPerGB: "30"
+volumeBindingMode: WaitForFirstConsumer
diff --git a/kube/storage-fast-azure.yaml b/kube/storage-fast-azure.yaml
index 8c310b2d72..d816d13e0d 100644
--- a/kube/storage-fast-azure.yaml
+++ b/kube/storage-fast-azure.yaml
@@ -5,3 +5,4 @@ metadata:
provisioner: kubernetes.io/azure-disk
parameters:
skuName: Premium_LRS
+volumeBindingMode: WaitForFirstConsumer
diff --git a/kube/storage-fast-gcp.yaml b/kube/storage-fast-gcp.yaml
index 690db8c953..19c1d72e1a 100644
--- a/kube/storage-fast-gcp.yaml
+++ b/kube/storage-fast-gcp.yaml
@@ -5,3 +5,4 @@ metadata:
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-ssd
+volumeBindingMode: WaitForFirstConsumer
diff --git a/m3db.io/_redirects b/m3db.io/_redirects
index 70b31da994..9553b4d189 100644
--- a/m3db.io/_redirects
+++ b/m3db.io/_redirects
@@ -1,3 +1,3 @@
https://m3metrics.io/* https://m3db.io/:splat 301!
https://m3db.netlify.com/* https://m3db.io/:splat 301!
-/talks https://m3db.github.io/m3/overview/media/ 301!
+/talks https://docs.m3db.io/overview/media/ 301!
diff --git a/m3db.io/index.html b/m3db.io/index.html
index 6d6832a6ad..c409fb8f92 100644
--- a/m3db.io/index.html
+++ b/m3db.io/index.html
@@ -31,9 +31,9 @@
@@ -45,8 +45,8 @@
diff --git a/mkdocs.yml b/mkdocs.yml
index 074d33a499..352ed75b0e 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -39,10 +39,11 @@ markdown_extensions:
- pymdownx.betterem:
smart_enable: all
- pymdownx.caret
- - pymdownx.critic
- pymdownx.details
- pymdownx.emoji:
emoji_generator: !!python/name:pymdownx.emoji.to_svg
+ - pymdownx.highlight:
+ use_pygments: true
- pymdownx.inlinehilite
- pymdownx.magiclink
- pymdownx.mark
@@ -53,6 +54,7 @@ markdown_extensions:
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.tilde
+ - admonition
pages:
- "Introduction": "index.md"
@@ -60,6 +62,7 @@ pages:
- "Components": "overview/components.md"
- "Motivation": "overview/motivation.md"
- "Media": "overview/media.md"
+ - "Roadmap": "overview/roadmap.md"
- "M3DB":
- "Introduction": "m3db/index.md"
- "Architecture":
@@ -71,32 +74,35 @@ pages:
- "Commit Logs": "m3db/architecture/commitlogs.md"
- "Peer Streaming": "m3db/architecture/peer_streaming.md"
- "Caching": "m3db/architecture/caching.md"
- - "Coordinator":
- - "Introduction": "coordinator/index.md"
+ - "M3 Coordinator":
+ - "Introduction": "m3coordinator/index.md"
- "API":
- - "Prometheus Remote Write/Read": "coordinator/api/remote.md"
- - "Query Engine":
- - "Introduction": "query_engine/index.md"
+ - "Prometheus Remote Write/Read": "m3coordinator/api/remote.md"
+ - "M3 Query":
+ - "Introduction": "m3query/index.md"
+ - "Configuration":
+ - "Query Engine": "m3query/config/index.md"
+ - "Annotated Config File": "m3query/config/annotated_config.md"
- "API":
- - "Query": "query_engine/api/index.md"
+ - "Query": "m3query/api/index.md"
- "Architecture":
- - "Overview": "query_engine/architecture/index.md"
- - "Blocks": "query_engine/architecture/blocks.md"
- - "Query Fanout": "query_engine/architecture/fanout.md"
- - "Function Processing": "query_engine/architecture/functions.md"
- - "Configuration":
- - "Annotated Config File": "query_engine/config/annotated_config.md"
+ - "Overview": "m3query/architecture/index.md"
+ - "Blocks": "m3query/architecture/blocks.md"
+ - "Query Fanout": "m3query/architecture/fanout.md"
+ - "Function Processing": "m3query/architecture/functions.md"
- "How-To's":
- "M3DB Single Node Deployment": "how_to/single_node.md"
- "M3DB Cluster Deployment, Manually": "how_to/cluster_hard_way.md"
- "M3DB on Kubernetes": "how_to/kubernetes.md"
- "M3Query": "how_to/query.md"
+ - "M3Aggregator": "how_to/aggregator.md"
- "Use M3DB as a general purpose time series database": "how_to/use_as_tsdb.md"
- "Operational Guides":
- "Overview": "operational_guide/index.md"
- "Replication and Deployment in Zones": "operational_guide/replication_and_deployment_in_zones.md"
- - "Replication Between Clusters": "operational_guide/replication_between_clusters.md"
- - "Repairs": "operational_guide/repairs.md"
+ - "Monitoring": "operational_guide/monitoring.md"
+ - "Upgrading M3": "operational_guide/upgrading_m3.md"
+ - "Resource Limits and Preventing Abusive Reads/Writes": "operational_guide/resource_limits.md"
- "Tuning Availability, Consistency, and Durability": "operational_guide/availability_consistency_durability.md"
- "Placement/Topology": "operational_guide/placement.md"
- "Placement/Topology Configuration": "operational_guide/placement_configuration.md"
@@ -104,19 +110,15 @@ pages:
- "Bootstrapping & Crash Recovery": "operational_guide/bootstrapping_crash_recovery.md"
- "Docker & Kernel Configuration": "operational_guide/kernel_configuration.md"
- "etcd": "operational_guide/etcd.md"
- - "Monitoring": "operational_guide/monitoring.md"
- "Configuring Mapping & Rollup Rules": "operational_guide/mapping_rollup.md"
- "Upgrading M3": "operational_guide/upgrading_m3.md"
+ - "Repairs": "operational_guide/repairs.md"
+ - "Replication Between Clusters": "operational_guide/replication_between_clusters.md"
- "Integrations":
- "Prometheus": "integrations/prometheus.md"
- "Graphite": "integrations/graphite.md"
- "Grafana": "integrations/grafana.md"
- - "Performance":
- - "Introduction": "performance/index.md"
- - "M3DB":
- - "M3DB Performance": "performance/m3db/index.md"
- - "m3query":
- - "m3query Performance": "performance/m3query/index.md"
+ - "InfluxDB": "integrations/influxdb.md"
- "Troubleshooting": "troubleshooting/index.md"
- "FAQs": "faqs/index.md"
- "Glossary": "glossary/index.md"
diff --git a/scripts/comparator/queries.json b/scripts/comparator/basic_queries/queries.json
similarity index 72%
rename from scripts/comparator/queries.json
rename to scripts/comparator/basic_queries/queries.json
index 4c329bfb3c..beb3d78215 100644
--- a/scripts/comparator/queries.json
+++ b/scripts/comparator/basic_queries/queries.json
@@ -1,4 +1,14 @@
[
+ {
+ "queryGroup":"scalar",
+ "queries":[
+ "42",
+ "time()"
+ ],
+ "steps" : [
+ "1m"
+ ]
+ },
{
"queryGroup":"fetch",
"queries":[
@@ -7,23 +17,24 @@
],
"steps" : [
"15s",
- "30s",
+ "30s",
"1m"
]
},
{
"queryGroup":"temporal",
"queries":[
- "rate(quail[1m])",
- "irate(quail[5m])",
- "delta(quail[123s])",
- "idelta(quail[1m] offset 5m)",
- "deriv(quail[5m])"
+ "rate(multi_1[1m])",
+ "irate(multi_1[5m])",
+ "delta(multi_1[123s])",
+ "idelta(multi_1[1m] offset 1h)",
+ "deriv(multi_1[3m])"
],
"steps" : [
"15s",
"30s",
- "1m"
+ "1m",
+ "5m"
]
},
{
@@ -80,5 +91,17 @@
"30s",
"1m"
]
+ },
+ {
+ "queryGroup":"topk",
+ "reruns": 5,
+ "queries":[
+ "topk(2, quack)",
+ "topk(2, avg_over_time(quack[30s]))",
+ "topk(2, avg(avg_over_time(quack[30s])) by (name))"
+ ],
+ "steps" : [
+ "1m"
+ ]
}
]
diff --git a/scripts/comparator/compare.go b/scripts/comparator/compare.go
index 9a96c6bc35..eee6ce1cfc 100644
--- a/scripts/comparator/compare.go
+++ b/scripts/comparator/compare.go
@@ -21,7 +21,9 @@
package main
import (
+ "bytes"
"encoding/json"
+ "errors"
"flag"
"fmt"
"io/ioutil"
@@ -51,7 +53,10 @@ func main() {
pQueryFile = flag.String("input", "", "the query file")
pPromAddress = flag.String("promAdress", "0.0.0.0:9090", "prom address")
- pQueryAddress = flag.String("queryAddress", "0.0.0.0:7201", "query address")
+ pQueryAddress = flag.String("queryAddress", "0.0.0.0:7201/m3query", "M3 query address")
+
+ pComparatorAddress = flag.String("comparator", "", "comparator address")
+ pRegressionDir = flag.String("regressionDir", "", "optional directory for regression tests")
pStart = flag.Int64("s", now.Add(time.Hour*-3).Unix(), "start time")
pEnd = flag.Int64("e", now.Unix(), "start end")
@@ -63,10 +68,15 @@ func main() {
promAddress = *pPromAddress
queryAddress = *pQueryAddress
+ regressionDir = *pRegressionDir
+ comparatorAddress = *pComparatorAddress
+
start = *pStart
end = *pEnd
)
+ fmt.Println(queryFile, start, end)
+
if len(queryFile) == 0 {
paramError("No query found", log)
os.Exit(1)
@@ -95,20 +105,141 @@ func main() {
var multiErr xerrors.MultiError
for _, queryGroup := range queries {
- if err := runQueryGroup(
- queryGroup,
- promAddress,
- queryAddress,
- log,
- ); err != nil {
- multiErr = multiErr.Add(err)
+ runs := 1
+ if queryGroup.Reruns > 1 {
+ runs = queryGroup.Reruns
+ }
+
+ for i := 0; i < runs; i++ {
+ log.Info("running query group",
+ zap.String("group", queryGroup.QueryGroup),
+ zap.Int("run", i+1))
+ if err := runQueryGroup(
+ queryGroup,
+ promAddress,
+ queryAddress,
+ log,
+ ); err != nil {
+ multiErr = multiErr.Add(err)
+ log.Error("query group encountered failure",
+ zap.String("group", queryGroup.QueryGroup),
+ zap.Int("run", i+1),
+ zap.Error(err))
+ }
}
}
- if multiErr.LastError() != nil {
- log.Error("mismatched queries detected")
- os.Exit(1)
+ if !multiErr.Empty() {
+ log.Fatal("mismatched queries detected in base queries")
+ }
+ log.Info("base queries success")
+
+ if err := runRegressionSuite(regressionDir, comparatorAddress,
+ promAddress, queryAddress, log); err != nil {
+ log.Fatal("failure or mismatched queries detected in regression suite", zap.Error(err))
+ }
+ log.Info("regression success")
+}
+
+func runRegressionSuite(
+ regressionDir string,
+ comparatorAddress string,
+ promAddress string,
+ queryAddress string,
+ log *zap.Logger,
+) error {
+ fmt.Println("dir", regressionDir, "add", comparatorAddress)
+ if len(regressionDir) == 0 {
+ log.Info("no regression directory supplied.")
+ return nil
+ }
+
+ if len(comparatorAddress) == 0 {
+ err := errors.New("no comparator address")
+ log.Error("regression turned on but no comparator address", zap.Error(err))
+ return err
+ }
+
+ regressions, err := utils.ParseRegressionFilesToPromQLQueryGroup(regressionDir, log)
+ if err != nil {
+ log.Error("could not parse regressions to PromQL queries", zap.Error(err))
+ return err
+ }
+
+ var multiErr xerrors.MultiError
+ for _, regressionGroup := range regressions {
+ runs := 1
+ if regressionGroup.Reruns > 1 {
+ runs = regressionGroup.Reruns
+ }
+
+ for i := 0; i < runs; i++ {
+ log.Info("running query group",
+ zap.String("group", regressionGroup.QueryGroup),
+ zap.Int("run", i+1))
+
+ if err := runRegression(
+ regressionGroup,
+ comparatorAddress,
+ promAddress,
+ queryAddress,
+ log,
+ ); err != nil {
+ multiErr = multiErr.Add(err)
+ }
+ }
+ }
+
+ if err := multiErr.LastError(); err != nil {
+ log.Error("mismatched queries detected in regression queries", zap.Error(err))
+ return err
+ }
+
+ return nil
+}
+
+func runRegression(
+ queryGroup utils.PromQLRegressionQueryGroup,
+ comparatorAddress string,
+ promAddress string,
+ queryAddress string,
+ log *zap.Logger,
+) error {
+ data, err := json.Marshal(queryGroup.Data)
+ if err != nil {
+ log.Error("could not marshall data", zap.Error(err))
+ return err
+ }
+
+ comparatorURL := fmt.Sprintf("http://%s", comparatorAddress)
+ resp, err := http.Post(comparatorURL, "application/json", bytes.NewReader(data))
+ if err != nil {
+ log.Error("could not seed regression data", zap.Error(err))
+ return err
+ }
+
+ if resp.StatusCode/200 != 1 {
+ log.Error("seed status code not 2XX",
+ zap.Int("code", resp.StatusCode),
+ zap.String("status", resp.Status))
+ return fmt.Errorf("status code %d", resp.StatusCode)
+ }
+
+ var multiErr xerrors.MultiError
+ for _, query := range queryGroup.Queries {
+ promURL := fmt.Sprintf("http://%s%s", promAddress, query)
+ queryURL := fmt.Sprintf("http://%s%s", queryAddress, query)
+ if err := runComparison(promURL, queryURL, log); err != nil {
+ multiErr = multiErr.Add(err)
+ log.Error(
+ "mismatched query",
+ zap.String("promURL", promURL),
+ zap.String("queryURL", queryURL),
+ )
+ }
}
+
+ return multiErr.FinalError()
}
func runQueryGroup(
@@ -117,8 +248,6 @@ func runQueryGroup(
queryAddress string,
log *zap.Logger,
) error {
- log.Info("running query group", zap.String("group", queryGroup.QueryGroup))
-
var multiErr xerrors.MultiError
for _, query := range queryGroup.Queries {
promURL := fmt.Sprintf("http://%s%s", promAddress, query)
@@ -141,13 +270,13 @@ func runComparison(
queryURL string,
log *zap.Logger,
) error {
- promResult, err := parseResult(promURL, log)
+ promResult, err := parseResult(promURL)
if err != nil {
log.Error("failed to parse Prometheus result", zap.Error(err))
return err
}
- queryResult, err := parseResult(queryURL, log)
+ queryResult, err := parseResult(queryURL)
if err != nil {
log.Error("failed to parse M3Query result", zap.Error(err))
return err
@@ -155,17 +284,14 @@ func runComparison(
_, err = promResult.Matches(queryResult)
if err != nil {
- log.Error("mismatch", zap.Error((err)))
+ log.Error("mismatch", zap.Error(err))
return err
}
return nil
}
-func parseResult(
- endpoint string,
- log *zap.Logger,
-) (prometheus.Response, error) {
+func parseResult(endpoint string) (prometheus.Response, error) {
var result prometheus.Response
response, err := http.Get(endpoint)
if err != nil {
diff --git a/scripts/comparator/docker-compose.yml b/scripts/comparator/docker-compose.yml
index 4ad11c9c2d..580f233613 100755
--- a/scripts/comparator/docker-compose.yml
+++ b/scripts/comparator/docker-compose.yml
@@ -3,8 +3,10 @@ services:
m3comparator:
expose:
- "9000"
+ - "9001"
ports:
- "0.0.0.0:9000:9000"
+ - "0.0.0.0:9001:9001"
networks:
- backend
image: "m3comparator:${REVISION}"
diff --git a/scripts/comparator/m3comparator.Dockerfile b/scripts/comparator/m3comparator.Dockerfile
index d71483bb43..ee89a6102f 100755
--- a/scripts/comparator/m3comparator.Dockerfile
+++ b/scripts/comparator/m3comparator.Dockerfile
@@ -5,7 +5,7 @@ RUN mkdir -p /bin
RUN mkdir -p /etc/m3comparator
ADD ./m3comparator /bin/
-EXPOSE 9000/tcp
+EXPOSE 9000/tcp 9001/tcp
ENTRYPOINT [ "/bin/m3comparator" ]
CMD
diff --git a/scripts/comparator/regression_data/rate_tag_mismatch.json b/scripts/comparator/regression_data/rate_tag_mismatch.json
new file mode 100644
index 0000000000..c6f82d61a1
--- /dev/null
+++ b/scripts/comparator/regression_data/rate_tag_mismatch.json
@@ -0,0 +1,2184 @@
+
+{
+ "name": "aggregated_30d rate must be smaller than default",
+ "query": "sum(rate(database_write_tagged_success[1m])) by (namespace)",
+ "startMillis": 1584825526,
+ "endMillis": 1584828792,
+ "step": 2,
+ "data": [
+ {
+ "start": "2020-03-21T21:23:32Z",
+ "end": "2020-03-21T22:08:20Z",
+ "tags": [
+ ["__name__", "database_write_tagged_success"],
+ ["foo", "bar"],
+ ["namespace", "aggregated_30d]"]
+ ],
+ "datapoints": [
+ {
+ "val": "248093255",
+ "ts": "2020-03-21T21:23:34.269Z"
+ },
+ {
+ "val": "248093255",
+ "ts": "2020-03-21T21:23:44.269Z"
+ },
+ {
+ "val": "248093255",
+ "ts": "2020-03-21T21:23:54.269Z"
+ },
+ {
+ "val": "248093255",
+ "ts": "2020-03-21T21:24:04.269Z"
+ },
+ {
+ "val": "248096711",
+ "ts": "2020-03-21T21:24:14.269Z"
+ },
+ {
+ "val": "248150842",
+ "ts": "2020-03-21T21:24:24.269Z"
+ },
+ {
+ "val": "248150842",
+ "ts": "2020-03-21T21:24:34.269Z"
+ },
+ {
+ "val": "248150842",
+ "ts": "2020-03-21T21:24:44.269Z"
+ },
+ {
+ "val": "248150842",
+ "ts": "2020-03-21T21:24:54.269Z"
+ },
+ {
+ "val": "248150842",
+ "ts": "2020-03-21T21:25:04.269Z"
+ },
+ {
+ "val": "248154992",
+ "ts": "2020-03-21T21:25:14.269Z"
+ },
+ {
+ "val": "248208429",
+ "ts": "2020-03-21T21:25:24.269Z"
+ },
+ {
+ "val": "248208429",
+ "ts": "2020-03-21T21:25:34.269Z"
+ },
+ {
+ "val": "248208429",
+ "ts": "2020-03-21T21:25:44.269Z"
+ },
+ {
+ "val": "248208429",
+ "ts": "2020-03-21T21:25:54.269Z"
+ },
+ {
+ "val": "248208429",
+ "ts": "2020-03-21T21:26:04.269Z"
+ },
+ {
+ "val": "248212148",
+ "ts": "2020-03-21T21:26:14.269Z"
+ },
+ {
+ "val": "248266016",
+ "ts": "2020-03-21T21:26:24.269Z"
+ },
+ {
+ "val": "248266016",
+ "ts": "2020-03-21T21:26:34.269Z"
+ },
+ {
+ "val": "248266016",
+ "ts": "2020-03-21T21:26:44.269Z"
+ },
+ {
+ "val": "248266016",
+ "ts": "2020-03-21T21:26:54.269Z"
+ },
+ {
+ "val": "248266016",
+ "ts": "2020-03-21T21:27:04.269Z"
+ },
+ {
+ "val": "248269497",
+ "ts": "2020-03-21T21:27:14.269Z"
+ },
+ {
+ "val": "248323603",
+ "ts": "2020-03-21T21:27:24.269Z"
+ },
+ {
+ "val": "248323603",
+ "ts": "2020-03-21T21:27:34.269Z"
+ },
+ {
+ "val": "248323603",
+ "ts": "2020-03-21T21:27:44.269Z"
+ },
+ {
+ "val": "248323603",
+ "ts": "2020-03-21T21:27:54.269Z"
+ },
+ {
+ "val": "248323603",
+ "ts": "2020-03-21T21:28:04.269Z"
+ },
+ {
+ "val": "248327450",
+ "ts": "2020-03-21T21:28:14.269Z"
+ },
+ {
+ "val": "248381190",
+ "ts": "2020-03-21T21:28:24.269Z"
+ },
+ {
+ "val": "248381190",
+ "ts": "2020-03-21T21:28:34.269Z"
+ },
+ {
+ "val": "248381190",
+ "ts": "2020-03-21T21:28:44.269Z"
+ },
+ {
+ "val": "248381190",
+ "ts": "2020-03-21T21:28:54.269Z"
+ },
+ {
+ "val": "248381190",
+ "ts": "2020-03-21T21:29:04.269Z"
+ },
+ {
+ "val": "248384673",
+ "ts": "2020-03-21T21:29:14.269Z"
+ },
+ {
+ "val": "248438777",
+ "ts": "2020-03-21T21:29:24.269Z"
+ },
+ {
+ "val": "248438777",
+ "ts": "2020-03-21T21:29:34.269Z"
+ },
+ {
+ "val": "248438777",
+ "ts": "2020-03-21T21:29:44.269Z"
+ },
+ {
+ "val": "248438777",
+ "ts": "2020-03-21T21:29:54.269Z"
+ },
+ {
+ "val": "248438777",
+ "ts": "2020-03-21T21:30:04.269Z"
+ },
+ {
+ "val": "248442689",
+ "ts": "2020-03-21T21:30:14.269Z"
+ },
+ {
+ "val": "248496364",
+ "ts": "2020-03-21T21:30:24.269Z"
+ },
+ {
+ "val": "248496364",
+ "ts": "2020-03-21T21:30:34.269Z"
+ },
+ {
+ "val": "248496364",
+ "ts": "2020-03-21T21:30:44.269Z"
+ },
+ {
+ "val": "248496364",
+ "ts": "2020-03-21T21:30:54.269Z"
+ },
+ {
+ "val": "248496364",
+ "ts": "2020-03-21T21:31:04.269Z"
+ },
+ {
+ "val": "248499995",
+ "ts": "2020-03-21T21:31:14.269Z"
+ },
+ {
+ "val": "248553951",
+ "ts": "2020-03-21T21:31:24.269Z"
+ },
+ {
+ "val": "248553951",
+ "ts": "2020-03-21T21:31:34.269Z"
+ },
+ {
+ "val": "248553951",
+ "ts": "2020-03-21T21:31:44.269Z"
+ },
+ {
+ "val": "248553951",
+ "ts": "2020-03-21T21:31:54.269Z"
+ },
+ {
+ "val": "248553951",
+ "ts": "2020-03-21T21:32:04.269Z"
+ },
+ {
+ "val": "248557460",
+ "ts": "2020-03-21T21:32:14.269Z"
+ },
+ {
+ "val": "248611538",
+ "ts": "2020-03-21T21:32:24.269Z"
+ },
+ {
+ "val": "248611538",
+ "ts": "2020-03-21T21:32:34.269Z"
+ },
+ {
+ "val": "248611538",
+ "ts": "2020-03-21T21:32:44.269Z"
+ },
+ {
+ "val": "248611538",
+ "ts": "2020-03-21T21:32:54.269Z"
+ },
+ {
+ "val": "248611538",
+ "ts": "2020-03-21T21:33:04.269Z"
+ },
+ {
+ "val": "248615571",
+ "ts": "2020-03-21T21:33:14.269Z"
+ },
+ {
+ "val": "248669125",
+ "ts": "2020-03-21T21:33:24.269Z"
+ },
+ {
+ "val": "248669125",
+ "ts": "2020-03-21T21:33:34.269Z"
+ },
+ {
+ "val": "248669125",
+ "ts": "2020-03-21T21:33:44.269Z"
+ },
+ {
+ "val": "248669125",
+ "ts": "2020-03-21T21:33:54.269Z"
+ },
+ {
+ "val": "248669125",
+ "ts": "2020-03-21T21:34:04.269Z"
+ },
+ {
+ "val": "248669985",
+ "ts": "2020-03-21T21:34:14.269Z"
+ },
+ {
+ "val": "248676426",
+ "ts": "2020-03-21T21:34:24.269Z"
+ },
+ {
+ "val": "248676426",
+ "ts": "2020-03-21T21:34:34.269Z"
+ },
+ {
+ "val": "248676426",
+ "ts": "2020-03-21T21:34:44.269Z"
+ },
+ {
+ "val": "248676426",
+ "ts": "2020-03-21T21:34:54.269Z"
+ },
+ {
+ "val": "248676426",
+ "ts": "2020-03-21T21:35:04.269Z"
+ },
+ {
+ "val": "248676428",
+ "ts": "2020-03-21T21:35:14.269Z"
+ },
+ {
+ "val": "248676450",
+ "ts": "2020-03-21T21:35:24.269Z"
+ },
+ {
+ "val": "248676450",
+ "ts": "2020-03-21T21:35:34.269Z"
+ },
+ {
+ "val": "248676450",
+ "ts": "2020-03-21T21:35:44.269Z"
+ },
+ {
+ "val": "248676450",
+ "ts": "2020-03-21T21:35:54.269Z"
+ },
+ {
+ "val": "248676450",
+ "ts": "2020-03-21T21:36:04.269Z"
+ },
+ {
+ "val": "248676452",
+ "ts": "2020-03-21T21:36:14.269Z"
+ },
+ {
+ "val": "248676474",
+ "ts": "2020-03-21T21:36:24.269Z"
+ },
+ {
+ "val": "248676474",
+ "ts": "2020-03-21T21:36:34.269Z"
+ },
+ {
+ "val": "248676474",
+ "ts": "2020-03-21T21:36:44.269Z"
+ },
+ {
+ "val": "248676474",
+ "ts": "2020-03-21T21:36:54.269Z"
+ },
+ {
+ "val": "248676474",
+ "ts": "2020-03-21T21:37:04.269Z"
+ },
+ {
+ "val": "248676476",
+ "ts": "2020-03-21T21:37:14.269Z"
+ },
+ {
+ "val": "248676498",
+ "ts": "2020-03-21T21:37:24.269Z"
+ },
+ {
+ "val": "248676498",
+ "ts": "2020-03-21T21:37:34.269Z"
+ },
+ {
+ "val": "248676498",
+ "ts": "2020-03-21T21:37:44.269Z"
+ },
+ {
+ "val": "248676498",
+ "ts": "2020-03-21T21:37:54.269Z"
+ },
+ {
+ "val": "248676498",
+ "ts": "2020-03-21T21:38:04.269Z"
+ },
+ {
+ "val": "248676500",
+ "ts": "2020-03-21T21:38:14.269Z"
+ },
+ {
+ "val": "248676522",
+ "ts": "2020-03-21T21:38:24.269Z"
+ },
+ {
+ "val": "248676522",
+ "ts": "2020-03-21T21:38:34.269Z"
+ },
+ {
+ "val": "248676522",
+ "ts": "2020-03-21T21:38:44.269Z"
+ },
+ {
+ "val": "248676522",
+ "ts": "2020-03-21T21:38:54.269Z"
+ },
+ {
+ "val": "248676522",
+ "ts": "2020-03-21T21:39:04.269Z"
+ },
+ {
+ "val": "248676524",
+ "ts": "2020-03-21T21:39:14.269Z"
+ },
+ {
+ "val": "248676546",
+ "ts": "2020-03-21T21:39:24.269Z"
+ },
+ {
+ "val": "248676546",
+ "ts": "2020-03-21T21:39:34.269Z"
+ },
+ {
+ "val": "248676546",
+ "ts": "2020-03-21T21:39:44.269Z"
+ },
+ {
+ "val": "248676546",
+ "ts": "2020-03-21T21:39:54.269Z"
+ },
+ {
+ "val": "248676546",
+ "ts": "2020-03-21T21:40:04.269Z"
+ },
+ {
+ "val": "248676548",
+ "ts": "2020-03-21T21:40:14.269Z"
+ },
+ {
+ "val": "248676570",
+ "ts": "2020-03-21T21:40:24.269Z"
+ },
+ {
+ "val": "248676570",
+ "ts": "2020-03-21T21:40:34.269Z"
+ },
+ {
+ "val": "248676570",
+ "ts": "2020-03-21T21:40:44.269Z"
+ },
+ {
+ "val": "248676570",
+ "ts": "2020-03-21T21:40:54.269Z"
+ },
+ {
+ "val": "248676570",
+ "ts": "2020-03-21T21:41:04.269Z"
+ },
+ {
+ "val": "248676572",
+ "ts": "2020-03-21T21:41:14.269Z"
+ },
+ {
+ "val": "248676594",
+ "ts": "2020-03-21T21:41:24.269Z"
+ },
+ {
+ "val": "248676594",
+ "ts": "2020-03-21T21:41:34.269Z"
+ },
+ {
+ "val": "248676594",
+ "ts": "2020-03-21T21:41:44.269Z"
+ },
+ {
+ "val": "248676594",
+ "ts": "2020-03-21T21:41:54.269Z"
+ },
+ {
+ "val": "248676594",
+ "ts": "2020-03-21T21:42:04.269Z"
+ },
+ {
+ "val": "248676596",
+ "ts": "2020-03-21T21:42:14.269Z"
+ },
+ {
+ "val": "248676618",
+ "ts": "2020-03-21T21:42:24.269Z"
+ },
+ {
+ "val": "248676618",
+ "ts": "2020-03-21T21:42:34.269Z"
+ },
+ {
+ "val": "248676618",
+ "ts": "2020-03-21T21:42:44.269Z"
+ },
+ {
+ "val": "248676618",
+ "ts": "2020-03-21T21:42:54.269Z"
+ },
+ {
+ "val": "248676618",
+ "ts": "2020-03-21T21:43:04.269Z"
+ },
+ {
+ "val": "248676620",
+ "ts": "2020-03-21T21:43:14.269Z"
+ },
+ {
+ "val": "248676642",
+ "ts": "2020-03-21T21:43:24.269Z"
+ },
+ {
+ "val": "248676642",
+ "ts": "2020-03-21T21:43:34.269Z"
+ },
+ {
+ "val": "248676642",
+ "ts": "2020-03-21T21:43:44.269Z"
+ },
+ {
+ "val": "248676642",
+ "ts": "2020-03-21T21:43:54.269Z"
+ },
+ {
+ "val": "248676642",
+ "ts": "2020-03-21T21:44:04.269Z"
+ },
+ {
+ "val": "248676644",
+ "ts": "2020-03-21T21:44:14.269Z"
+ },
+ {
+ "val": "248676666",
+ "ts": "2020-03-21T21:44:24.269Z"
+ },
+ {
+ "val": "248676666",
+ "ts": "2020-03-21T21:44:34.269Z"
+ },
+ {
+ "val": "248676666",
+ "ts": "2020-03-21T21:44:44.269Z"
+ },
+ {
+ "val": "248676666",
+ "ts": "2020-03-21T21:44:54.269Z"
+ },
+ {
+ "val": "248676666",
+ "ts": "2020-03-21T21:45:04.269Z"
+ },
+ {
+ "val": "248676668",
+ "ts": "2020-03-21T21:45:14.269Z"
+ },
+ {
+ "val": "248676690",
+ "ts": "2020-03-21T21:45:24.269Z"
+ },
+ {
+ "val": "248676690",
+ "ts": "2020-03-21T21:45:34.269Z"
+ },
+ {
+ "val": "248676690",
+ "ts": "2020-03-21T21:45:44.269Z"
+ },
+ {
+ "val": "248676690",
+ "ts": "2020-03-21T21:45:54.269Z"
+ },
+ {
+ "val": "248676690",
+ "ts": "2020-03-21T21:46:04.269Z"
+ },
+ {
+ "val": "248676692",
+ "ts": "2020-03-21T21:46:14.269Z"
+ },
+ {
+ "val": "248676714",
+ "ts": "2020-03-21T21:46:24.269Z"
+ },
+ {
+ "val": "248676714",
+ "ts": "2020-03-21T21:46:34.269Z"
+ },
+ {
+ "val": "248676714",
+ "ts": "2020-03-21T21:46:44.269Z"
+ },
+ {
+ "val": "248676714",
+ "ts": "2020-03-21T21:46:54.269Z"
+ },
+ {
+ "val": "248676714",
+ "ts": "2020-03-21T21:47:04.269Z"
+ },
+ {
+ "val": "248676716",
+ "ts": "2020-03-21T21:47:14.269Z"
+ },
+ {
+ "val": "248676738",
+ "ts": "2020-03-21T21:47:24.269Z"
+ },
+ {
+ "val": "248676738",
+ "ts": "2020-03-21T21:47:34.269Z"
+ },
+ {
+ "val": "248676738",
+ "ts": "2020-03-21T21:47:44.269Z"
+ },
+ {
+ "val": "248676738",
+ "ts": "2020-03-21T21:47:54.269Z"
+ },
+ {
+ "val": "248676738",
+ "ts": "2020-03-21T21:48:04.269Z"
+ },
+ {
+ "val": "248676740",
+ "ts": "2020-03-21T21:48:14.269Z"
+ },
+ {
+ "val": "248676762",
+ "ts": "2020-03-21T21:48:24.269Z"
+ },
+ {
+ "val": "248676762",
+ "ts": "2020-03-21T21:48:34.269Z"
+ },
+ {
+ "val": "248676762",
+ "ts": "2020-03-21T21:48:44.269Z"
+ },
+ {
+ "val": "248676762",
+ "ts": "2020-03-21T21:48:54.269Z"
+ },
+ {
+ "val": "248676762",
+ "ts": "2020-03-21T21:49:04.269Z"
+ },
+ {
+ "val": "248676764",
+ "ts": "2020-03-21T21:49:14.269Z"
+ },
+ {
+ "val": "248676786",
+ "ts": "2020-03-21T21:49:24.269Z"
+ },
+ {
+ "val": "248676786",
+ "ts": "2020-03-21T21:49:34.269Z"
+ },
+ {
+ "val": "248676786",
+ "ts": "2020-03-21T21:49:44.269Z"
+ },
+ {
+ "val": "248676786",
+ "ts": "2020-03-21T21:49:54.269Z"
+ },
+ {
+ "val": "248676786",
+ "ts": "2020-03-21T21:50:04.269Z"
+ },
+ {
+ "val": "248676788",
+ "ts": "2020-03-21T21:50:14.269Z"
+ },
+ {
+ "val": "248676810",
+ "ts": "2020-03-21T21:50:24.269Z"
+ },
+ {
+ "val": "248676810",
+ "ts": "2020-03-21T21:50:34.269Z"
+ },
+ {
+ "val": "248676810",
+ "ts": "2020-03-21T21:50:44.269Z"
+ },
+ {
+ "val": "248676810",
+ "ts": "2020-03-21T21:50:54.269Z"
+ },
+ {
+ "val": "248676810",
+ "ts": "2020-03-21T21:51:04.269Z"
+ },
+ {
+ "val": "248676812",
+ "ts": "2020-03-21T21:51:14.269Z"
+ },
+ {
+ "val": "248676834",
+ "ts": "2020-03-21T21:51:24.269Z"
+ },
+ {
+ "val": "248676834",
+ "ts": "2020-03-21T21:51:34.269Z"
+ },
+ {
+ "val": "248676834",
+ "ts": "2020-03-21T21:51:44.269Z"
+ },
+ {
+ "val": "248676834",
+ "ts": "2020-03-21T21:51:54.269Z"
+ },
+ {
+ "val": "248676834",
+ "ts": "2020-03-21T21:52:04.269Z"
+ },
+ {
+ "val": "248676836",
+ "ts": "2020-03-21T21:52:14.269Z"
+ },
+ {
+ "val": "248676858",
+ "ts": "2020-03-21T21:52:24.269Z"
+ },
+ {
+ "val": "248676858",
+ "ts": "2020-03-21T21:52:34.269Z"
+ },
+ {
+ "val": "248676858",
+ "ts": "2020-03-21T21:52:44.269Z"
+ },
+ {
+ "val": "248676858",
+ "ts": "2020-03-21T21:52:54.269Z"
+ },
+ {
+ "val": "248676858",
+ "ts": "2020-03-21T21:53:04.269Z"
+ },
+ {
+ "val": "248676860",
+ "ts": "2020-03-21T21:53:14.269Z"
+ },
+ {
+ "val": "248676882",
+ "ts": "2020-03-21T21:53:24.269Z"
+ },
+ {
+ "val": "248676882",
+ "ts": "2020-03-21T21:53:34.269Z"
+ },
+ {
+ "val": "248676882",
+ "ts": "2020-03-21T21:53:44.269Z"
+ },
+ {
+ "val": "248676882",
+ "ts": "2020-03-21T21:53:54.269Z"
+ },
+ {
+ "val": "248676882",
+ "ts": "2020-03-21T21:54:04.269Z"
+ },
+ {
+ "val": "248676884",
+ "ts": "2020-03-21T21:54:14.269Z"
+ },
+ {
+ "val": "248676906",
+ "ts": "2020-03-21T21:54:24.269Z"
+ },
+ {
+ "val": "248676906",
+ "ts": "2020-03-21T21:54:34.269Z"
+ },
+ {
+ "val": "248676906",
+ "ts": "2020-03-21T21:54:44.269Z"
+ },
+ {
+ "val": "248676906",
+ "ts": "2020-03-21T21:54:54.269Z"
+ },
+ {
+ "val": "248676906",
+ "ts": "2020-03-21T21:55:04.269Z"
+ },
+ {
+ "val": "248676908",
+ "ts": "2020-03-21T21:55:14.269Z"
+ },
+ {
+ "val": "248676930",
+ "ts": "2020-03-21T21:55:24.269Z"
+ },
+ {
+ "val": "248676930",
+ "ts": "2020-03-21T21:55:34.269Z"
+ },
+ {
+ "val": "248676930",
+ "ts": "2020-03-21T21:55:44.269Z"
+ },
+ {
+ "val": "248676930",
+ "ts": "2020-03-21T21:55:54.269Z"
+ },
+ {
+ "val": "248676930",
+ "ts": "2020-03-21T21:56:04.269Z"
+ },
+ {
+ "val": "248676932",
+ "ts": "2020-03-21T21:56:14.269Z"
+ },
+ {
+ "val": "248676954",
+ "ts": "2020-03-21T21:56:24.269Z"
+ },
+ {
+ "val": "248676954",
+ "ts": "2020-03-21T21:56:34.269Z"
+ },
+ {
+ "val": "248676954",
+ "ts": "2020-03-21T21:56:44.269Z"
+ },
+ {
+ "val": "248676954",
+ "ts": "2020-03-21T21:56:54.269Z"
+ },
+ {
+ "val": "248676954",
+ "ts": "2020-03-21T21:57:04.269Z"
+ },
+ {
+ "val": "248680544",
+ "ts": "2020-03-21T21:57:14.269Z"
+ },
+ {
+ "val": "248734541",
+ "ts": "2020-03-21T21:57:24.269Z"
+ },
+ {
+ "val": "248734541",
+ "ts": "2020-03-21T21:57:34.269Z"
+ },
+ {
+ "val": "248734541",
+ "ts": "2020-03-21T21:57:44.269Z"
+ },
+ {
+ "val": "248734541",
+ "ts": "2020-03-21T21:57:54.269Z"
+ },
+ {
+ "val": "248734541",
+ "ts": "2020-03-21T21:58:04.269Z"
+ },
+ {
+ "val": "248738067",
+ "ts": "2020-03-21T21:58:14.269Z"
+ },
+ {
+ "val": "248792128",
+ "ts": "2020-03-21T21:58:24.269Z"
+ },
+ {
+ "val": "248792128",
+ "ts": "2020-03-21T21:58:34.269Z"
+ },
+ {
+ "val": "248792128",
+ "ts": "2020-03-21T21:58:44.269Z"
+ },
+ {
+ "val": "248792128",
+ "ts": "2020-03-21T21:58:54.269Z"
+ },
+ {
+ "val": "248792128",
+ "ts": "2020-03-21T21:59:04.269Z"
+ },
+ {
+ "val": "248796520",
+ "ts": "2020-03-21T21:59:14.269Z"
+ },
+ {
+ "val": "248849715",
+ "ts": "2020-03-21T21:59:24.269Z"
+ },
+ {
+ "val": "248849715",
+ "ts": "2020-03-21T21:59:34.269Z"
+ },
+ {
+ "val": "248849715",
+ "ts": "2020-03-21T21:59:44.269Z"
+ },
+ {
+ "val": "248849715",
+ "ts": "2020-03-21T21:59:54.269Z"
+ },
+ {
+ "val": "248849715",
+ "ts": "2020-03-21T22:00:04.269Z"
+ },
+ {
+ "val": "248854769",
+ "ts": "2020-03-21T22:00:14.269Z"
+ },
+ {
+ "val": "248907302",
+ "ts": "2020-03-21T22:00:24.269Z"
+ },
+ {
+ "val": "248907302",
+ "ts": "2020-03-21T22:00:34.269Z"
+ },
+ {
+ "val": "248907302",
+ "ts": "2020-03-21T22:00:44.269Z"
+ },
+ {
+ "val": "248907302",
+ "ts": "2020-03-21T22:00:54.269Z"
+ },
+ {
+ "val": "248907302",
+ "ts": "2020-03-21T22:01:04.269Z"
+ },
+ {
+ "val": "248911261",
+ "ts": "2020-03-21T22:01:14.269Z"
+ },
+ {
+ "val": "248964889",
+ "ts": "2020-03-21T22:01:24.269Z"
+ },
+ {
+ "val": "248964889",
+ "ts": "2020-03-21T22:01:34.269Z"
+ },
+ {
+ "val": "248964889",
+ "ts": "2020-03-21T22:01:44.269Z"
+ },
+ {
+ "val": "248964889",
+ "ts": "2020-03-21T22:01:54.269Z"
+ },
+ {
+ "val": "248964889",
+ "ts": "2020-03-21T22:02:04.269Z"
+ },
+ {
+ "val": "248967357",
+ "ts": "2020-03-21T22:02:14.269Z"
+ },
+ {
+ "val": "249022476",
+ "ts": "2020-03-21T22:02:24.269Z"
+ },
+ {
+ "val": "249022476",
+ "ts": "2020-03-21T22:02:34.269Z"
+ },
+ {
+ "val": "249022476",
+ "ts": "2020-03-21T22:02:44.269Z"
+ },
+ {
+ "val": "249022476",
+ "ts": "2020-03-21T22:02:54.269Z"
+ },
+ {
+ "val": "249022476",
+ "ts": "2020-03-21T22:03:04.269Z"
+ },
+ {
+ "val": "249025867",
+ "ts": "2020-03-21T22:03:14.269Z"
+ },
+ {
+ "val": "249080063",
+ "ts": "2020-03-21T22:03:24.269Z"
+ },
+ {
+ "val": "249080063",
+ "ts": "2020-03-21T22:03:34.269Z"
+ },
+ {
+ "val": "249080063",
+ "ts": "2020-03-21T22:03:44.269Z"
+ },
+ {
+ "val": "249080063",
+ "ts": "2020-03-21T22:03:54.269Z"
+ },
+ {
+ "val": "249080063",
+ "ts": "2020-03-21T22:04:04.269Z"
+ },
+ {
+ "val": "249084381",
+ "ts": "2020-03-21T22:04:14.269Z"
+ },
+ {
+ "val": "249137650",
+ "ts": "2020-03-21T22:04:24.269Z"
+ },
+ {
+ "val": "249137650",
+ "ts": "2020-03-21T22:04:34.269Z"
+ },
+ {
+ "val": "249137650",
+ "ts": "2020-03-21T22:04:44.269Z"
+ },
+ {
+ "val": "249137650",
+ "ts": "2020-03-21T22:04:54.269Z"
+ },
+ {
+ "val": "249137650",
+ "ts": "2020-03-21T22:05:04.269Z"
+ },
+ {
+ "val": "249141405",
+ "ts": "2020-03-21T22:05:14.269Z"
+ },
+ {
+ "val": "249195237",
+ "ts": "2020-03-21T22:05:24.269Z"
+ },
+ {
+ "val": "249195237",
+ "ts": "2020-03-21T22:05:34.269Z"
+ },
+ {
+ "val": "249195237",
+ "ts": "2020-03-21T22:05:44.269Z"
+ },
+ {
+ "val": "249195237",
+ "ts": "2020-03-21T22:05:54.269Z"
+ },
+ {
+ "val": "249195237",
+ "ts": "2020-03-21T22:06:04.269Z"
+ },
+ {
+ "val": "249199194",
+ "ts": "2020-03-21T22:06:14.269Z"
+ },
+ {
+ "val": "249252824",
+ "ts": "2020-03-21T22:06:24.269Z"
+ },
+ {
+ "val": "249252824",
+ "ts": "2020-03-21T22:06:34.269Z"
+ },
+ {
+ "val": "249252824",
+ "ts": "2020-03-21T22:06:44.269Z"
+ },
+ {
+ "val": "249252824",
+ "ts": "2020-03-21T22:06:54.269Z"
+ },
+ {
+ "val": "249252824",
+ "ts": "2020-03-21T22:07:04.269Z"
+ },
+ {
+ "val": "249256352",
+ "ts": "2020-03-21T22:07:14.269Z"
+ },
+ {
+ "val": "249310411",
+ "ts": "2020-03-21T22:07:24.269Z"
+ },
+ {
+ "val": "249310411",
+ "ts": "2020-03-21T22:07:34.269Z"
+ },
+ {
+ "val": "249310411",
+ "ts": "2020-03-21T22:07:44.269Z"
+ },
+ {
+ "val": "249310411",
+ "ts": "2020-03-21T22:07:54.269Z"
+ },
+ {
+ "val": "249310411",
+ "ts": "2020-03-21T22:08:04.269Z"
+ },
+ {
+ "val": "249313891",
+ "ts": "2020-03-21T22:08:14.269Z"
+ }
+ ]
+ },
+ {
+ "start": "2020-03-21T21:23:32Z",
+ "end": "2020-03-21T22:08:20Z",
+ "tags": [
+ ["__name__", "database_write_tagged_success"],
+ ["foo", "bar"],
+ ["namespace", "default]"]
+ ],
+ "datapoints": [
+ {
+ "val": "1002212916",
+ "ts": "2020-03-21T21:23:34.269Z"
+ },
+ {
+ "val": "1002254540",
+ "ts": "2020-03-21T21:23:44.269Z"
+ },
+ {
+ "val": "1002290564",
+ "ts": "2020-03-21T21:23:54.269Z"
+ },
+ {
+ "val": "1002329788",
+ "ts": "2020-03-21T21:24:04.269Z"
+ },
+ {
+ "val": "1002371412",
+ "ts": "2020-03-21T21:24:14.269Z"
+ },
+ {
+ "val": "1002408236",
+ "ts": "2020-03-21T21:24:24.269Z"
+ },
+ {
+ "val": "1002446660",
+ "ts": "2020-03-21T21:24:34.269Z"
+ },
+ {
+ "val": "1002487484",
+ "ts": "2020-03-21T21:24:44.269Z"
+ },
+ {
+ "val": "1002524308",
+ "ts": "2020-03-21T21:24:54.269Z"
+ },
+ {
+ "val": "1002563532",
+ "ts": "2020-03-21T21:25:04.269Z"
+ },
+ {
+ "val": "1002604695",
+ "ts": "2020-03-21T21:25:14.269Z"
+ },
+ {
+ "val": "1002641180",
+ "ts": "2020-03-21T21:25:24.269Z"
+ },
+ {
+ "val": "1002679604",
+ "ts": "2020-03-21T21:25:34.269Z"
+ },
+ {
+ "val": "1002721228",
+ "ts": "2020-03-21T21:25:44.269Z"
+ },
+ {
+ "val": "1002758052",
+ "ts": "2020-03-21T21:25:54.269Z"
+ },
+ {
+ "val": "1002797276",
+ "ts": "2020-03-21T21:26:04.269Z"
+ },
+ {
+ "val": "1002838100",
+ "ts": "2020-03-21T21:26:14.269Z"
+ },
+ {
+ "val": "1002874924",
+ "ts": "2020-03-21T21:26:24.269Z"
+ },
+ {
+ "val": "1002914148",
+ "ts": "2020-03-21T21:26:34.269Z"
+ },
+ {
+ "val": "1002954972",
+ "ts": "2020-03-21T21:26:44.269Z"
+ },
+ {
+ "val": "1002991796",
+ "ts": "2020-03-21T21:26:54.269Z"
+ },
+ {
+ "val": "1003030220",
+ "ts": "2020-03-21T21:27:04.269Z"
+ },
+ {
+ "val": "1003071844",
+ "ts": "2020-03-21T21:27:14.269Z"
+ },
+ {
+ "val": "1003108668",
+ "ts": "2020-03-21T21:27:24.269Z"
+ },
+ {
+ "val": "1003147892",
+ "ts": "2020-03-21T21:27:34.269Z"
+ },
+ {
+ "val": "1003188716",
+ "ts": "2020-03-21T21:27:44.269Z"
+ },
+ {
+ "val": "1003225540",
+ "ts": "2020-03-21T21:27:54.269Z"
+ },
+ {
+ "val": "1003263964",
+ "ts": "2020-03-21T21:28:04.269Z"
+ },
+ {
+ "val": "1003304788",
+ "ts": "2020-03-21T21:28:14.269Z"
+ },
+ {
+ "val": "1003341612",
+ "ts": "2020-03-21T21:28:24.269Z"
+ },
+ {
+ "val": "1003380836",
+ "ts": "2020-03-21T21:28:34.269Z"
+ },
+ {
+ "val": "1003422460",
+ "ts": "2020-03-21T21:28:44.269Z"
+ },
+ {
+ "val": "1003459284",
+ "ts": "2020-03-21T21:28:54.269Z"
+ },
+ {
+ "val": "1003497708",
+ "ts": "2020-03-21T21:29:04.269Z"
+ },
+ {
+ "val": "1003538532",
+ "ts": "2020-03-21T21:29:14.269Z"
+ },
+ {
+ "val": "1003576156",
+ "ts": "2020-03-21T21:29:24.269Z"
+ },
+ {
+ "val": "1003614580",
+ "ts": "2020-03-21T21:29:34.269Z"
+ },
+ {
+ "val": "1003642604",
+ "ts": "2020-03-21T21:29:44.269Z"
+ },
+ {
+ "val": "1003692228",
+ "ts": "2020-03-21T21:29:54.269Z"
+ },
+ {
+ "val": "1003723452",
+ "ts": "2020-03-21T21:30:04.269Z"
+ },
+ {
+ "val": "1003747476",
+ "ts": "2020-03-21T21:30:14.269Z"
+ },
+ {
+ "val": "1003763500",
+ "ts": "2020-03-21T21:30:24.269Z"
+ },
+ {
+ "val": "1003773124",
+ "ts": "2020-03-21T21:30:34.269Z"
+ },
+ {
+ "val": "1003781948",
+ "ts": "2020-03-21T21:30:44.269Z"
+ },
+ {
+ "val": "1003793972",
+ "ts": "2020-03-21T21:30:54.269Z"
+ },
+ {
+ "val": "1003807180",
+ "ts": "2020-03-21T21:31:04.269Z"
+ },
+ {
+ "val": "1003819620",
+ "ts": "2020-03-21T21:31:14.269Z"
+ },
+ {
+ "val": "1003827644",
+ "ts": "2020-03-21T21:31:24.269Z"
+ },
+ {
+ "val": "1003841268",
+ "ts": "2020-03-21T21:31:34.269Z"
+ },
+ {
+ "val": "1003849292",
+ "ts": "2020-03-21T21:31:44.269Z"
+ },
+ {
+ "val": "1003860516",
+ "ts": "2020-03-21T21:31:54.269Z"
+ },
+ {
+ "val": "1003876540",
+ "ts": "2020-03-21T21:32:04.269Z"
+ },
+ {
+ "val": "1003890964",
+ "ts": "2020-03-21T21:32:14.269Z"
+ },
+ {
+ "val": "1003908588",
+ "ts": "2020-03-21T21:32:24.269Z"
+ },
+ {
+ "val": "1003920612",
+ "ts": "2020-03-21T21:32:34.269Z"
+ },
+ {
+ "val": "1003928636",
+ "ts": "2020-03-21T21:32:44.269Z"
+ },
+ {
+ "val": "1003947860",
+ "ts": "2020-03-21T21:32:54.269Z"
+ },
+ {
+ "val": "1003959084",
+ "ts": "2020-03-21T21:33:04.269Z"
+ },
+ {
+ "val": "1003972708",
+ "ts": "2020-03-21T21:33:14.269Z"
+ },
+ {
+ "val": "1003989532",
+ "ts": "2020-03-21T21:33:24.269Z"
+ },
+ {
+ "val": "1004006356",
+ "ts": "2020-03-21T21:33:34.269Z"
+ },
+ {
+ "val": "1004019180",
+ "ts": "2020-03-21T21:33:44.269Z"
+ },
+ {
+ "val": "1004032804",
+ "ts": "2020-03-21T21:33:54.269Z"
+ },
+ {
+ "val": "1004044828",
+ "ts": "2020-03-21T21:34:04.269Z"
+ },
+ {
+ "val": "1004058452",
+ "ts": "2020-03-21T21:34:14.269Z"
+ },
+ {
+ "val": "1004069676",
+ "ts": "2020-03-21T21:34:24.269Z"
+ },
+ {
+ "val": "1004080900",
+ "ts": "2020-03-21T21:34:34.269Z"
+ },
+ {
+ "val": "1004100924",
+ "ts": "2020-03-21T21:34:44.269Z"
+ },
+ {
+ "val": "1004115348",
+ "ts": "2020-03-21T21:34:54.269Z"
+ },
+ {
+ "val": "1004131372",
+ "ts": "2020-03-21T21:35:04.269Z"
+ },
+ {
+ "val": "1004152196",
+ "ts": "2020-03-21T21:35:14.269Z"
+ },
+ {
+ "val": "1004164220",
+ "ts": "2020-03-21T21:35:24.269Z"
+ },
+ {
+ "val": "1004181044",
+ "ts": "2020-03-21T21:35:34.269Z"
+ },
+ {
+ "val": "1004196268",
+ "ts": "2020-03-21T21:35:44.269Z"
+ },
+ {
+ "val": "1004209892",
+ "ts": "2020-03-21T21:35:54.269Z"
+ },
+ {
+ "val": "1004217916",
+ "ts": "2020-03-21T21:36:04.269Z"
+ },
+ {
+ "val": "1004237140",
+ "ts": "2020-03-21T21:36:14.269Z"
+ },
+ {
+ "val": "1004249964",
+ "ts": "2020-03-21T21:36:24.269Z"
+ },
+ {
+ "val": "1004259588",
+ "ts": "2020-03-21T21:36:34.269Z"
+ },
+ {
+ "val": "1004276412",
+ "ts": "2020-03-21T21:36:44.269Z"
+ },
+ {
+ "val": "1004290836",
+ "ts": "2020-03-21T21:36:54.269Z"
+ },
+ {
+ "val": "1004306060",
+ "ts": "2020-03-21T21:37:04.269Z"
+ },
+ {
+ "val": "1004318884",
+ "ts": "2020-03-21T21:37:14.269Z"
+ },
+ {
+ "val": "1004329308",
+ "ts": "2020-03-21T21:37:24.269Z"
+ },
+ {
+ "val": "1004342132",
+ "ts": "2020-03-21T21:37:34.269Z"
+ },
+ {
+ "val": "1004361356",
+ "ts": "2020-03-21T21:37:44.269Z"
+ },
+ {
+ "val": "1004372580",
+ "ts": "2020-03-21T21:37:54.269Z"
+ },
+ {
+ "val": "1004382204",
+ "ts": "2020-03-21T21:38:04.269Z"
+ },
+ {
+ "val": "1004389428",
+ "ts": "2020-03-21T21:38:14.269Z"
+ },
+ {
+ "val": "1004403052",
+ "ts": "2020-03-21T21:38:24.269Z"
+ },
+ {
+ "val": "1004412676",
+ "ts": "2020-03-21T21:38:34.269Z"
+ },
+ {
+ "val": "1004427100",
+ "ts": "2020-03-21T21:38:44.269Z"
+ },
+ {
+ "val": "1004439124",
+ "ts": "2020-03-21T21:38:54.269Z"
+ },
+ {
+ "val": "1004452748",
+ "ts": "2020-03-21T21:39:04.269Z"
+ },
+ {
+ "val": "1004462372",
+ "ts": "2020-03-21T21:39:14.269Z"
+ },
+ {
+ "val": "1004478396",
+ "ts": "2020-03-21T21:39:24.269Z"
+ },
+ {
+ "val": "1004485620",
+ "ts": "2020-03-21T21:39:34.269Z"
+ },
+ {
+ "val": "1004493644",
+ "ts": "2020-03-21T21:39:44.269Z"
+ },
+ {
+ "val": "1004504868",
+ "ts": "2020-03-21T21:39:54.269Z"
+ },
+ {
+ "val": "1004523292",
+ "ts": "2020-03-21T21:40:04.269Z"
+ },
+ {
+ "val": "1004536916",
+ "ts": "2020-03-21T21:40:14.269Z"
+ },
+ {
+ "val": "1004548140",
+ "ts": "2020-03-21T21:40:24.269Z"
+ },
+ {
+ "val": "1004557764",
+ "ts": "2020-03-21T21:40:34.269Z"
+ },
+ {
+ "val": "1004571388",
+ "ts": "2020-03-21T21:40:44.269Z"
+ },
+ {
+ "val": "1004580212",
+ "ts": "2020-03-21T21:40:54.269Z"
+ },
+ {
+ "val": "1004597036",
+ "ts": "2020-03-21T21:41:04.269Z"
+ },
+ {
+ "val": "1004604260",
+ "ts": "2020-03-21T21:41:14.269Z"
+ },
+ {
+ "val": "1004613884",
+ "ts": "2020-03-21T21:41:24.269Z"
+ },
+ {
+ "val": "1004625908",
+ "ts": "2020-03-21T21:41:34.269Z"
+ },
+ {
+ "val": "1004637932",
+ "ts": "2020-03-21T21:41:44.269Z"
+ },
+ {
+ "val": "1004652356",
+ "ts": "2020-03-21T21:41:54.269Z"
+ },
+ {
+ "val": "1004660380",
+ "ts": "2020-03-21T21:42:04.269Z"
+ },
+ {
+ "val": "1004667604",
+ "ts": "2020-03-21T21:42:14.269Z"
+ },
+ {
+ "val": "1004676428",
+ "ts": "2020-03-21T21:42:24.269Z"
+ },
+ {
+ "val": "1004686052",
+ "ts": "2020-03-21T21:42:34.269Z"
+ },
+ {
+ "val": "1004700476",
+ "ts": "2020-03-21T21:42:44.269Z"
+ },
+ {
+ "val": "1004710900",
+ "ts": "2020-03-21T21:42:54.269Z"
+ },
+ {
+ "val": "1004723724",
+ "ts": "2020-03-21T21:43:04.269Z"
+ },
+ {
+ "val": "1004734148",
+ "ts": "2020-03-21T21:43:14.269Z"
+ },
+ {
+ "val": "1004742972",
+ "ts": "2020-03-21T21:43:24.269Z"
+ },
+ {
+ "val": "1004754996",
+ "ts": "2020-03-21T21:43:34.269Z"
+ },
+ {
+ "val": "1004764620",
+ "ts": "2020-03-21T21:43:44.269Z"
+ },
+ {
+ "val": "1004777444",
+ "ts": "2020-03-21T21:43:54.269Z"
+ },
+ {
+ "val": "1004786268",
+ "ts": "2020-03-21T21:44:04.269Z"
+ },
+ {
+ "val": "1004794292",
+ "ts": "2020-03-21T21:44:14.269Z"
+ },
+ {
+ "val": "1004803916",
+ "ts": "2020-03-21T21:44:24.269Z"
+ },
+ {
+ "val": "1004815940",
+ "ts": "2020-03-21T21:44:34.269Z"
+ },
+ {
+ "val": "1004828764",
+ "ts": "2020-03-21T21:44:44.269Z"
+ },
+ {
+ "val": "1004841588",
+ "ts": "2020-03-21T21:44:54.269Z"
+ },
+ {
+ "val": "1004854412",
+ "ts": "2020-03-21T21:45:04.269Z"
+ },
+ {
+ "val": "1004863236",
+ "ts": "2020-03-21T21:45:14.269Z"
+ },
+ {
+ "val": "1004875260",
+ "ts": "2020-03-21T21:45:24.269Z"
+ },
+ {
+ "val": "1004888084",
+ "ts": "2020-03-21T21:45:34.269Z"
+ },
+ {
+ "val": "1004899308",
+ "ts": "2020-03-21T21:45:44.269Z"
+ },
+ {
+ "val": "1004914532",
+ "ts": "2020-03-21T21:45:54.269Z"
+ },
+ {
+ "val": "1004930556",
+ "ts": "2020-03-21T21:46:04.269Z"
+ },
+ {
+ "val": "1004948980",
+ "ts": "2020-03-21T21:46:14.269Z"
+ },
+ {
+ "val": "1004962604",
+ "ts": "2020-03-21T21:46:24.269Z"
+ },
+ {
+ "val": "1004973828",
+ "ts": "2020-03-21T21:46:34.269Z"
+ },
+ {
+ "val": "1004990652",
+ "ts": "2020-03-21T21:46:44.269Z"
+ },
+ {
+ "val": "1005013876",
+ "ts": "2020-03-21T21:46:54.269Z"
+ },
+ {
+ "val": "1005029100",
+ "ts": "2020-03-21T21:47:04.269Z"
+ },
+ {
+ "val": "1005045924",
+ "ts": "2020-03-21T21:47:14.269Z"
+ },
+ {
+ "val": "1005061148",
+ "ts": "2020-03-21T21:47:24.269Z"
+ },
+ {
+ "val": "1005082772",
+ "ts": "2020-03-21T21:47:34.269Z"
+ },
+ {
+ "val": "1005098796",
+ "ts": "2020-03-21T21:47:44.269Z"
+ },
+ {
+ "val": "1005129220",
+ "ts": "2020-03-21T21:47:54.269Z"
+ },
+ {
+ "val": "1005158044",
+ "ts": "2020-03-21T21:48:04.269Z"
+ },
+ {
+ "val": "1005175668",
+ "ts": "2020-03-21T21:48:14.269Z"
+ },
+ {
+ "val": "1005201292",
+ "ts": "2020-03-21T21:48:24.269Z"
+ },
+ {
+ "val": "1005226116",
+ "ts": "2020-03-21T21:48:34.269Z"
+ },
+ {
+ "val": "1005245308",
+ "ts": "2020-03-21T21:48:44.269Z"
+ },
+ {
+ "val": "1005263764",
+ "ts": "2020-03-21T21:48:54.269Z"
+ },
+ {
+ "val": "1005297388",
+ "ts": "2020-03-21T21:49:04.269Z"
+ },
+ {
+ "val": "1005316612",
+ "ts": "2020-03-21T21:49:14.269Z"
+ },
+ {
+ "val": "1005335836",
+ "ts": "2020-03-21T21:49:24.269Z"
+ },
+ {
+ "val": "1005369460",
+ "ts": "2020-03-21T21:49:34.269Z"
+ },
+ {
+ "val": "1005397484",
+ "ts": "2020-03-21T21:49:44.269Z"
+ },
+ {
+ "val": "1005422308",
+ "ts": "2020-03-21T21:49:54.269Z"
+ },
+ {
+ "val": "1005446913",
+ "ts": "2020-03-21T21:50:04.269Z"
+ },
+ {
+ "val": "1005469556",
+ "ts": "2020-03-21T21:50:14.269Z"
+ },
+ {
+ "val": "1005503180",
+ "ts": "2020-03-21T21:50:24.269Z"
+ },
+ {
+ "val": "1005526404",
+ "ts": "2020-03-21T21:50:34.269Z"
+ },
+ {
+ "val": "1005548828",
+ "ts": "2020-03-21T21:50:44.269Z"
+ },
+ {
+ "val": "1005564052",
+ "ts": "2020-03-21T21:50:54.269Z"
+ },
+ {
+ "val": "1005586444",
+ "ts": "2020-03-21T21:51:04.269Z"
+ },
+ {
+ "val": "1005610500",
+ "ts": "2020-03-21T21:51:14.269Z"
+ },
+ {
+ "val": "1005660124",
+ "ts": "2020-03-21T21:51:24.269Z"
+ },
+ {
+ "val": "1005713748",
+ "ts": "2020-03-21T21:51:34.269Z"
+ },
+ {
+ "val": "1005791372",
+ "ts": "2020-03-21T21:51:44.269Z"
+ },
+ {
+ "val": "1005861796",
+ "ts": "2020-03-21T21:51:54.269Z"
+ },
+ {
+ "val": "1005925020",
+ "ts": "2020-03-21T21:52:04.269Z"
+ },
+ {
+ "val": "1005987444",
+ "ts": "2020-03-21T21:52:14.269Z"
+ },
+ {
+ "val": "1006037068",
+ "ts": "2020-03-21T21:52:24.269Z"
+ },
+ {
+ "val": "1006107492",
+ "ts": "2020-03-21T21:52:34.269Z"
+ },
+ {
+ "val": "1006185916",
+ "ts": "2020-03-21T21:52:44.269Z"
+ },
+ {
+ "val": "1006261140",
+ "ts": "2020-03-21T21:52:54.269Z"
+ },
+ {
+ "val": "1006341964",
+ "ts": "2020-03-21T21:53:04.269Z"
+ },
+ {
+ "val": "1006418788",
+ "ts": "2020-03-21T21:53:14.269Z"
+ },
+ {
+ "val": "1006490812",
+ "ts": "2020-03-21T21:53:24.269Z"
+ },
+ {
+ "val": "1006563636",
+ "ts": "2020-03-21T21:53:34.269Z"
+ },
+ {
+ "val": "1006639660",
+ "ts": "2020-03-21T21:53:44.269Z"
+ },
+ {
+ "val": "1006709284",
+ "ts": "2020-03-21T21:53:54.269Z"
+ },
+ {
+ "val": "1006786108",
+ "ts": "2020-03-21T21:54:04.269Z"
+ },
+ {
+ "val": "1006863732",
+ "ts": "2020-03-21T21:54:14.269Z"
+ },
+ {
+ "val": "1006943756",
+ "ts": "2020-03-21T21:54:24.269Z"
+ },
+ {
+ "val": "1007025380",
+ "ts": "2020-03-21T21:54:34.269Z"
+ },
+ {
+ "val": "1007107004",
+ "ts": "2020-03-21T21:54:44.269Z"
+ },
+ {
+ "val": "1007187828",
+ "ts": "2020-03-21T21:54:54.269Z"
+ },
+ {
+ "val": "1007270252",
+ "ts": "2020-03-21T21:55:04.269Z"
+ },
+ {
+ "val": "1007350276",
+ "ts": "2020-03-21T21:55:14.269Z"
+ },
+ {
+ "val": "1007428700",
+ "ts": "2020-03-21T21:55:24.269Z"
+ },
+ {
+ "val": "1007511124",
+ "ts": "2020-03-21T21:55:34.269Z"
+ },
+ {
+ "val": "1007595148",
+ "ts": "2020-03-21T21:55:44.269Z"
+ },
+ {
+ "val": "1007677572",
+ "ts": "2020-03-21T21:55:54.269Z"
+ },
+ {
+ "val": "1007761180",
+ "ts": "2020-03-21T21:56:04.269Z"
+ },
+ {
+ "val": "1007842420",
+ "ts": "2020-03-21T21:56:14.269Z"
+ },
+ {
+ "val": "1007924844",
+ "ts": "2020-03-21T21:56:24.269Z"
+ },
+ {
+ "val": "1008008068",
+ "ts": "2020-03-21T21:56:34.269Z"
+ },
+ {
+ "val": "1008091292",
+ "ts": "2020-03-21T21:56:44.269Z"
+ },
+ {
+ "val": "1008173716",
+ "ts": "2020-03-21T21:56:54.269Z"
+ },
+ {
+ "val": "1008256940",
+ "ts": "2020-03-21T21:57:04.269Z"
+ },
+ {
+ "val": "1008340164",
+ "ts": "2020-03-21T21:57:14.269Z"
+ },
+ {
+ "val": "1008424188",
+ "ts": "2020-03-21T21:57:24.269Z"
+ },
+ {
+ "val": "1008503412",
+ "ts": "2020-03-21T21:57:34.269Z"
+ },
+ {
+ "val": "1008587404",
+ "ts": "2020-03-21T21:57:44.269Z"
+ },
+ {
+ "val": "1008669860",
+ "ts": "2020-03-21T21:57:54.269Z"
+ },
+ {
+ "val": "1008754684",
+ "ts": "2020-03-21T21:58:04.269Z"
+ },
+ {
+ "val": "1008824308",
+ "ts": "2020-03-21T21:58:14.269Z"
+ },
+ {
+ "val": "1008879532",
+ "ts": "2020-03-21T21:58:24.269Z"
+ },
+ {
+ "val": "1008918756",
+ "ts": "2020-03-21T21:58:34.269Z"
+ },
+ {
+ "val": "1008959580",
+ "ts": "2020-03-21T21:58:44.269Z"
+ },
+ {
+ "val": "1008996404",
+ "ts": "2020-03-21T21:58:54.269Z"
+ },
+ {
+ "val": "1009034828",
+ "ts": "2020-03-21T21:59:04.269Z"
+ },
+ {
+ "val": "1009075652",
+ "ts": "2020-03-21T21:59:14.269Z"
+ },
+ {
+ "val": "1009112476",
+ "ts": "2020-03-21T21:59:24.269Z"
+ },
+ {
+ "val": "1009151700",
+ "ts": "2020-03-21T21:59:34.269Z"
+ },
+ {
+ "val": "1009193324",
+ "ts": "2020-03-21T21:59:44.269Z"
+ },
+ {
+ "val": "1009230148",
+ "ts": "2020-03-21T21:59:54.269Z"
+ },
+ {
+ "val": "1009268572",
+ "ts": "2020-03-21T22:00:04.269Z"
+ },
+ {
+ "val": "1009309396",
+ "ts": "2020-03-21T22:00:14.269Z"
+ },
+ {
+ "val": "1009346220",
+ "ts": "2020-03-21T22:00:24.269Z"
+ },
+ {
+ "val": "1009385444",
+ "ts": "2020-03-21T22:00:34.269Z"
+ },
+ {
+ "val": "1009426268",
+ "ts": "2020-03-21T22:00:44.269Z"
+ },
+ {
+ "val": "1009463092",
+ "ts": "2020-03-21T22:00:54.269Z"
+ },
+ {
+ "val": "1009501516",
+ "ts": "2020-03-21T22:01:04.269Z"
+ },
+ {
+ "val": "1009543140",
+ "ts": "2020-03-21T22:01:14.269Z"
+ },
+ {
+ "val": "1009579964",
+ "ts": "2020-03-21T22:01:24.269Z"
+ },
+ {
+ "val": "1009619188",
+ "ts": "2020-03-21T22:01:34.269Z"
+ },
+ {
+ "val": "1009660012",
+ "ts": "2020-03-21T22:01:44.269Z"
+ },
+ {
+ "val": "1009696836",
+ "ts": "2020-03-21T22:01:54.269Z"
+ },
+ {
+ "val": "1009736060",
+ "ts": "2020-03-21T22:02:04.269Z"
+ },
+ {
+ "val": "1009776884",
+ "ts": "2020-03-21T22:02:14.269Z"
+ },
+ {
+ "val": "1009813708",
+ "ts": "2020-03-21T22:02:24.269Z"
+ },
+ {
+ "val": "1009852132",
+ "ts": "2020-03-21T22:02:34.269Z"
+ },
+ {
+ "val": "1009893756",
+ "ts": "2020-03-21T22:02:44.269Z"
+ },
+ {
+ "val": "1009929780",
+ "ts": "2020-03-21T22:02:54.269Z"
+ },
+ {
+ "val": "1009969804",
+ "ts": "2020-03-21T22:03:04.269Z"
+ },
+ {
+ "val": "1010010628",
+ "ts": "2020-03-21T22:03:14.269Z"
+ },
+ {
+ "val": "1010047452",
+ "ts": "2020-03-21T22:03:24.269Z"
+ },
+ {
+ "val": "1010085876",
+ "ts": "2020-03-21T22:03:34.269Z"
+ },
+ {
+ "val": "1010126700",
+ "ts": "2020-03-21T22:03:44.269Z"
+ },
+ {
+ "val": "1010164324",
+ "ts": "2020-03-21T22:03:54.269Z"
+ },
+ {
+ "val": "1010202748",
+ "ts": "2020-03-21T22:04:04.269Z"
+ },
+ {
+ "val": "1010244372",
+ "ts": "2020-03-21T22:04:14.269Z"
+ },
+ {
+ "val": "1010280396",
+ "ts": "2020-03-21T22:04:24.269Z"
+ },
+ {
+ "val": "1010320420",
+ "ts": "2020-03-21T22:04:34.269Z"
+ },
+ {
+ "val": "1010361244",
+ "ts": "2020-03-21T22:04:44.269Z"
+ },
+ {
+ "val": "1010398068",
+ "ts": "2020-03-21T22:04:54.269Z"
+ },
+ {
+ "val": "1010436492",
+ "ts": "2020-03-21T22:05:04.269Z"
+ },
+ {
+ "val": "1010477316",
+ "ts": "2020-03-21T22:05:14.269Z"
+ },
+ {
+ "val": "1010514140",
+ "ts": "2020-03-21T22:05:24.269Z"
+ },
+ {
+ "val": "1010553364",
+ "ts": "2020-03-21T22:05:34.269Z"
+ },
+ {
+ "val": "1010594988",
+ "ts": "2020-03-21T22:05:44.269Z"
+ },
+ {
+ "val": "1010631012",
+ "ts": "2020-03-21T22:05:54.269Z"
+ },
+ {
+ "val": "1010670236",
+ "ts": "2020-03-21T22:06:04.269Z"
+ },
+ {
+ "val": "1010711060",
+ "ts": "2020-03-21T22:06:14.269Z"
+ },
+ {
+ "val": "1010747884",
+ "ts": "2020-03-21T22:06:24.269Z"
+ },
+ {
+ "val": "1010787108",
+ "ts": "2020-03-21T22:06:34.269Z"
+ },
+ {
+ "val": "1010827932",
+ "ts": "2020-03-21T22:06:44.269Z"
+ },
+ {
+ "val": "1010864756",
+ "ts": "2020-03-21T22:06:54.269Z"
+ },
+ {
+ "val": "1010903180",
+ "ts": "2020-03-21T22:07:04.269Z"
+ },
+ {
+ "val": "1010944804",
+ "ts": "2020-03-21T22:07:14.269Z"
+ },
+ {
+ "val": "1010982428",
+ "ts": "2020-03-21T22:07:24.269Z"
+ },
+ {
+ "val": "1011020052",
+ "ts": "2020-03-21T22:07:34.269Z"
+ },
+ {
+ "val": "1011061676",
+ "ts": "2020-03-21T22:07:44.269Z"
+ },
+ {
+ "val": "1011098500",
+ "ts": "2020-03-21T22:07:54.269Z"
+ },
+ {
+ "val": "1011137724",
+ "ts": "2020-03-21T22:08:04.269Z"
+ },
+ {
+ "val": "1011178548",
+ "ts": "2020-03-21T22:08:14.269Z"
+ }
+ ]
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/scripts/comparator/run.sh b/scripts/comparator/run.sh
index 63e0327ae8..ea5c05d781 100755
--- a/scripts/comparator/run.sh
+++ b/scripts/comparator/run.sh
@@ -9,12 +9,15 @@ export REVISION=$(git rev-parse HEAD)
CI=${CI:-true}
RUN_ONLY=${RUN_ONLY:-false}
-export QUERY_FILE=$COMPARATOR/queries.json
+export QUERY_FILE=$COMPARATOR/basic_queries/queries.json
+export REGRESSION_DIR=$COMPARATOR/regression_data
export GRAFANA_PATH=$COMPARATOR/grafana
export DASHBOARD=$GRAFANA_PATH/dash.json.out
export END=${END:-$(date +%s)}
export START=${START:-$(( $END - 10800 ))}
+# TODO: make this a bit less hacky in the future; e.g. take from config.
+export COMPARATOR_WRITE="localhost:9001"
function generate_dash {
TEMPLATE=$GRAFANA_PATH/dashboard.tmpl
@@ -65,4 +68,11 @@ then
trap defer EXIT
fi
-$comparator -input=$QUERY_FILE -s=$START -e=$END
+$comparator -input=$QUERY_FILE \
+-s=$START \
+-e=$END \
+-comparator=$COMPARATOR_WRITE \
+-regressionDir=$REGRESSION_DIR
+
+# Run PromQL testdata tests
+go test -v -timeout 300s -tags=compatibility -count=1 github.com/m3db/m3/src/query/test/compatibility/
diff --git a/scripts/comparator/utils/compare_utilities.go b/scripts/comparator/utils/compare_utilities.go
index 0b597b230a..9fe46d3f0d 100644
--- a/scripts/comparator/utils/compare_utilities.go
+++ b/scripts/comparator/utils/compare_utilities.go
@@ -26,7 +26,6 @@ import (
"net/url"
"os"
"strconv"
- "strings"
"go.uber.org/zap"
)
@@ -42,6 +41,8 @@ type InputQuery struct {
Queries []string `json:"queries"`
// Steps is the list of step sizes for these queries.
Steps []string `json:"steps"`
+ // Reruns is the number of times to rerun this query group.
+ Reruns int `json:"reruns"`
}
// PromQLQueryGroup is a list of constructed PromQL query groups.
@@ -50,6 +51,8 @@ type PromQLQueryGroup struct {
QueryGroup string
// Queries is a list of PromQL compatible queries.
Queries []string
+ // Reruns is the number of times to rerun this query group.
+ Reruns int
}
func (q InputQueries) constructPromQL(
@@ -68,20 +71,31 @@ func (q InputQuery) constructPromQL(start int64, end int64) PromQLQueryGroup {
queries := make([]string, 0, len(q.Queries)*len(q.Steps))
for _, inQuery := range q.Queries {
for _, inStep := range q.Steps {
- values := make(url.Values)
- values.Add("query", inQuery)
- values.Add("step", inStep)
- values.Add("start", strconv.Itoa(int(start)))
- values.Add("end", strconv.Itoa(int(end)))
- query := "/api/v1/query_range?" + values.Encode()
-
- queries = append(queries, query)
+ queryRangeValues := make(url.Values)
+ queryRangeValues.Add("query", inQuery)
+ queryRangeValues.Add("step", inStep)
+ queryRangeValues.Add("start", strconv.Itoa(int(start)))
+ queryRangeValues.Add("end", strconv.Itoa(int(end)))
+ queryRangePath := "/api/v1/query_range?" + queryRangeValues.Encode()
+
+ queryValues := make(url.Values)
+ queryValues.Add("query", inQuery)
+ queryValues.Add("time", strconv.Itoa(int(start)))
+ queryPath := "/api/v1/query?" + queryValues.Encode()
+
+ queries = append(queries, queryRangePath, queryPath)
}
}
+ runs := 1
+ if q.Reruns > 1 {
+ runs = q.Reruns
+ }
+
return PromQLQueryGroup{
QueryGroup: q.QueryGroup,
Queries: queries,
+ Reruns: runs,
}
}
@@ -104,7 +118,7 @@ func parseFileToQueries(
queries := make(InputQueries, 0, 10)
if err := json.Unmarshal(buf, &queries); err != nil {
- log.Error("could not unmarhsal queries", zap.Error(err))
+ log.Error("could not unmarshal queries", zap.Error(err))
return nil, err
}
@@ -126,82 +140,3 @@ func ParseFileToPromQLQueryGroup(
return queries.constructPromQL(start, end), nil
}
-
-// GrafanaQueries is a list of Grafana dashboard compatible queries.
-type GrafanaQueries struct {
- // QueryGroup is the general category for these queries.
- QueryGroup string
- // Queries is a list of Grafana dashboard compatible queries.
- Queries []GrafanaQuery
- // Index is this query group's index.
- Index int
-}
-
-// GrafanaQuery is a Grafana dashboard compatible query.
-type GrafanaQuery struct {
- // Query is the query.
- Query string
- // Interval is the step size.
- Interval string
- // Index is this query's index.
- Index int
- // Left indicates if this panel is on the left.
- Left bool
-}
-
-// constructGrafanaQueries constructs a list of Grafana dashboard compatible
-// queries.
-func (q InputQueries) constructGrafanaQueries() []GrafanaQueries {
- queries := make([]GrafanaQueries, 0, len(q))
- idx := 0
- for _, inQuery := range q {
- query, index := inQuery.constructGrafanaQuery(idx)
- idx = index
- // NB: don't add empty queries if they exist for whatever reason.
- if len(query.Queries) > 0 {
- queries = append(queries, query)
- }
- }
-
- return queries
-}
-
-func (q InputQuery) constructGrafanaQuery(idx int) (GrafanaQueries, int) {
- grafanaQueries := GrafanaQueries{
- QueryGroup: q.QueryGroup,
- Index: idx,
- }
-
- queries := make([]GrafanaQuery, 0, len(q.Queries)*len(q.Steps))
- left := true
- for _, inQuery := range q.Queries {
- for _, inStep := range q.Steps {
- idx++
- queries = append(queries, GrafanaQuery{
- Query: strings.ReplaceAll(inQuery, `"`, `\"`),
- Interval: inStep,
- Index: idx,
- Left: left,
- })
-
- left = !left
- }
- }
-
- grafanaQueries.Queries = queries
- return grafanaQueries, idx + 1
-}
-
-// ParseFileToGrafanaQueries parses a JSON queries file into Grafana dashboard
-// compatible queries.
-func ParseFileToGrafanaQueries(
- fileName string,
- log *zap.Logger,
-) ([]GrafanaQueries, error) {
- queries, err := parseFileToQueries(fileName, log)
- if err != nil {
- return nil, err
- }
-
- return queries.constructGrafanaQueries(), nil
-}
diff --git a/scripts/comparator/utils/compare_utilities_grafana.go b/scripts/comparator/utils/compare_utilities_grafana.go
new file mode 100644
index 0000000000..d5cf7fed32
--- /dev/null
+++ b/scripts/comparator/utils/compare_utilities_grafana.go
@@ -0,0 +1,106 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package utils
+
+import (
+ "strings"
+
+ "go.uber.org/zap"
+)
+
+// GrafanaQueries is a list of Grafana dashboard compatible queries.
+type GrafanaQueries struct {
+ // QueryGroup is the general category for these queries.
+ QueryGroup string
+ // Queries is a list of Grafana dashboard compatible queries.
+ Queries []GrafanaQuery
+ // Index is this query group's index.
+ Index int
+}
+
+// GrafanaQuery is a Grafana dashboard compatible query.
+type GrafanaQuery struct {
+ // Query is the query.
+ Query string
+ // Interval is the step size.
+ Interval string
+ // Index is this query's index.
+ Index int
+ // Left indicates if this panel is on the left.
+ Left bool
+}
+
+// constructGrafanaQueries constructs a list of Grafana dashboard compatible
+// queries.
+func (q InputQueries) constructGrafanaQueries() []GrafanaQueries {
+ queries := make([]GrafanaQueries, 0, len(q))
+ idx := 0
+ for _, inQuery := range q {
+ query, index := inQuery.constructGrafanaQuery(idx)
+ idx = index
+ // NB: don't add empty queries if they exist for whatever reason.
+ if len(query.Queries) > 0 {
+ queries = append(queries, query)
+ }
+ }
+
+ return queries
+}
+
+func (q InputQuery) constructGrafanaQuery(idx int) (GrafanaQueries, int) {
+ grafanaQueries := GrafanaQueries{
+ QueryGroup: q.QueryGroup,
+ Index: idx,
+ }
+
+ queries := make([]GrafanaQuery, 0, len(q.Queries)*len(q.Steps))
+ left := true
+ for _, inQuery := range q.Queries {
+ for _, inStep := range q.Steps {
+ idx++
+ queries = append(queries, GrafanaQuery{
+ Query: strings.ReplaceAll(inQuery, `"`, `\"`),
+ Interval: inStep,
+ Index: idx,
+ Left: left,
+ })
+
+ left = !left
+ }
+ }
+
+ grafanaQueries.Queries = queries
+ return grafanaQueries, idx + 1
+}
+
+// ParseFileToGrafanaQueries parses a JSON queries file into Grafana dashboard
+// compatible queries.
+func ParseFileToGrafanaQueries(
+ fileName string,
+ log *zap.Logger,
+) ([]GrafanaQueries, error) {
+ queries, err := parseFileToQueries(fileName, log)
+ if err != nil {
+ return nil, err
+ }
+
+ return queries.constructGrafanaQueries(), nil
+}
diff --git a/scripts/comparator/utils/compare_utilities_regression.go b/scripts/comparator/utils/compare_utilities_regression.go
new file mode 100644
index 0000000000..523b6aeaef
--- /dev/null
+++ b/scripts/comparator/utils/compare_utilities_regression.go
@@ -0,0 +1,149 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package utils
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
+
+ "go.uber.org/zap"
+)
+
+// RegressionQuery is the JSON representation of a query to be compared.
+type RegressionQuery struct {
+ Name string `json:"name"`
+ Query string `json:"query"`
+ StartMillis int64 `json:"startMillis"`
+ EndMillis int64 `json:"endMillis"`
+ Step int `json:"step"`
+ Retries int `json:"retries"`
+ Data []parser.Series `json:"data"`
+}
+
+func parseRegressionFileToQueries(
+ fileName string,
+ log *zap.Logger,
+) (RegressionQuery, error) {
+ file, err := os.Open(fileName)
+ if err != nil {
+ log.Error("could not open file", zap.Error(err))
+ return RegressionQuery{}, err
+ }
+
+ defer file.Close()
+ buf, err := ioutil.ReadAll(file)
+ if err != nil {
+ log.Error("could not read file", zap.Error(err))
+ return RegressionQuery{}, err
+ }
+
+ var query RegressionQuery
+ if err := json.Unmarshal(buf, &query); err != nil {
+ log.Error("could not unmarshal regression query", zap.Error(err))
+ return RegressionQuery{}, err
+ }
+
+ return query, err
+}
+
+// PromQLRegressionQueryGroup is a PromQLQueryGroup with a given data set.
+type PromQLRegressionQueryGroup struct {
+ PromQLQueryGroup
+ Retries int
+ Data []parser.Series
+}
+
+func (q RegressionQuery) constructPromQL() PromQLRegressionQueryGroup {
+ values := make(url.Values)
+ values.Add("query", q.Query)
+ values.Add("step", fmt.Sprint(q.Step))
+ values.Add("start", fmt.Sprint(q.StartMillis))
+ values.Add("end", fmt.Sprint(q.EndMillis))
+ query := "/api/v1/query_range?" + values.Encode()
+
+ retries := 1
+ if q.Retries > 1 {
+ retries = q.Retries
+ }
+
+ return PromQLRegressionQueryGroup{
+ PromQLQueryGroup: PromQLQueryGroup{
+ QueryGroup: q.Name,
+ Queries: []string{query},
+ },
+ Data: q.Data,
+ Retries: retries,
+ }
+}
+
+func parseRegressionFileToPromQLQueryGroup(
+ fileName string,
+ log *zap.Logger,
+) (PromQLRegressionQueryGroup, error) {
+ query, err := parseRegressionFileToQueries(fileName, log)
+ if err != nil {
+ return PromQLRegressionQueryGroup{}, err
+ }
+
+ return query.constructPromQL(), nil
+}
+
+// ParseRegressionFilesToPromQLQueryGroup parses a directory with
+// regression query files into PromQL query groups.
+func ParseRegressionFilesToPromQLQueryGroup(
+ directory string,
+ log *zap.Logger,
+) ([]PromQLRegressionQueryGroup, error) {
+ files, err := ioutil.ReadDir(directory)
+ if err != nil {
+ log.Info("could not read directory")
+ return nil, err
+ }
+
+ if len(files) == 0 {
+ log.Info("no files in directory")
+ return nil, nil
+ }
+
+ groups := make([]PromQLRegressionQueryGroup, 0, len(files))
+ for _, f := range files {
+ filePath := filepath.Join(directory, f.Name())
+ if f.IsDir() {
+ log.Info("skipping file", zap.String("filePath", filePath))
+ }
+
+ group, err := parseRegressionFileToPromQLQueryGroup(filePath, log)
+ if err != nil {
+ log.Error("failed to parse file", zap.String("path", filePath), zap.Error(err))
+ return nil, err
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
diff --git a/scripts/development/m3_stack/README.md b/scripts/development/m3_stack/README.md
index 10b5dcfa02..689766e41d 100644
--- a/scripts/development/m3_stack/README.md
+++ b/scripts/development/m3_stack/README.md
@@ -2,14 +2,22 @@
This docker-compose file will setup the following environment:
-1. 3 M3DB nodes with a single node acting as an ETCD seed
+1. 1 M3DB nodes with a single node acting as an ETCD seed
2. 1 M3Coordinator node
3. 1 Grafana node (with a pre-configured Prometheus source)
4. 1 Prometheus node that scrapes the M3DB/M3Coordinator nodes and writes the metrics to M3Coordinator
+The environment variables that let's you configure this setup are:
+- `USE_MULTI_DB_NODES=true`: uses 3 database nodes instead of 1 for cluster.
+- `USE_JAEGER=true`: look at traces emitted by M3 services.
+- `USE_PROMETHEUS_HA=true`: send data to M3 from two HA Prometheus instances to replicate deployments of HA Prometheus sending data to M3.
+- `USE_AGGREGATOR=true`: use dedicate aggregator to aggregate metrics.
+- `USE_AGGREGATOR_HA=true`: use two dedicated aggregators for HA aggregated metrics.
+- `USE_MULTIPROCESS_COORDINATOR=true`: use multi-process coordinator, with default number of processes configured.
+
## Usage
-Use the `start_m3.sh` and `stop_m3.sh` scripts.
+Use the `start_m3.sh` and `stop_m3.sh` scripts. Requires successful run of `make m3dbnode` from project root first.
## Grafana
@@ -48,4 +56,4 @@ Load can easily be increased by modifying the `prometheus.yml` file to reduce th
## Containers Hanging / Unresponsive
-Running the entire stack can be resource intensive. If the containers are unresponsive try increasing the amount of cores and memory that the docker daemon is allowed to use.
\ No newline at end of file
+Running the entire stack can be resource intensive. If the containers are unresponsive try increasing the amount of cores and memory that the docker daemon is allowed to use.
diff --git a/scripts/development/m3_stack/docker-compose.yml b/scripts/development/m3_stack/docker-compose.yml
index d0f8de35f0..83d7390dd9 100644
--- a/scripts/development/m3_stack/docker-compose.yml
+++ b/scripts/development/m3_stack/docker-compose.yml
@@ -4,9 +4,9 @@ services:
networks:
- backend
build:
- context: ../../../
- dockerfile: ./docker/m3dbnode/Dockerfile
- image: m3dbnode01:latest
+ context: ../../../bin
+ dockerfile: ./docker/m3dbnode/development.Dockerfile
+ image: m3dbnode:dev
volumes:
- "./m3dbnode.yml:/etc/m3dbnode/m3dbnode.yml"
- "./schema.proto:/etc/m3dbnode/schema.proto"
@@ -24,9 +24,9 @@ services:
networks:
- backend
build:
- context: ../../../
- dockerfile: ./docker/m3dbnode/Dockerfile
- image: m3dbnode02:latest
+ context: ../../../bin
+ dockerfile: ./docker/m3dbnode/development.Dockerfile
+ image: m3dbnode:dev
volumes:
- "./m3dbnode.yml:/etc/m3dbnode/m3dbnode.yml"
environment:
@@ -35,9 +35,9 @@ services:
networks:
- backend
build:
- context: ../../../
- dockerfile: ./docker/m3dbnode/Dockerfile
- image: m3dbnode03:latest
+ context: ../../../bin
+ dockerfile: ./docker/m3dbnode/development.Dockerfile
+ image: m3dbnode:dev
volumes:
- "./m3dbnode.yml:/etc/m3dbnode/m3dbnode.yml"
environment:
@@ -50,13 +50,28 @@ services:
networks:
- backend
build:
- context: ../../../
- dockerfile: ./docker/m3aggregator/Dockerfile
- image: m3aggregator01:latest
+ context: ../../../bin
+ dockerfile: ./docker/m3aggregator/development.Dockerfile
+ image: m3aggregator:dev
volumes:
- "./m3aggregator.yml:/etc/m3aggregator/m3aggregator.yml"
environment:
- M3AGGREGATOR_HOST_ID=m3aggregator01
+ m3aggregator02:
+ expose:
+ - "6002"
+ ports:
+ - "0.0.0.0:6002:6001"
+ networks:
+ - backend
+ build:
+ context: ../../../bin
+ dockerfile: ./docker/m3aggregator/development.Dockerfile
+ image: m3aggregator:dev
+ volumes:
+ - "./m3aggregator.yml:/etc/m3aggregator/m3aggregator.yml"
+ environment:
+ - M3AGGREGATOR_HOST_ID=m3aggregator02
m3coordinator01:
expose:
- "7201"
@@ -71,11 +86,13 @@ services:
networks:
- backend
build:
- context: ../../../
- dockerfile: ./docker/m3coordinator/Dockerfile
- image: m3coordinator01:latest
+ context: ../../../bin
+ dockerfile: ./docker/m3coordinator/development.Dockerfile
+ image: m3coordinator:dev
volumes:
- - "./m3coordinator.yml:/etc/m3coordinator/m3coordinator.yml"
+ # Use a git ignored path to easily change pre-set configs.
+ # Note: Use ".tmp" suffix is git ignored.
+ - "./m3coordinator.yml.tmp:/etc/m3coordinator/m3coordinator.yml"
- "./schema.proto:/etc/m3coordinator/schema.proto"
m3collector01:
expose:
@@ -87,9 +104,9 @@ services:
networks:
- backend
build:
- context: ../../../
- dockerfile: ./docker/m3collector/Dockerfile
- image: m3collector01:latest
+ context: ../../../bin
+ dockerfile: ./docker/m3collector/development.Dockerfile
+ image: m3collector:dev
volumes:
- "./m3collector.yml:/etc/m3collector/m3collector.yml"
prometheus01:
@@ -101,7 +118,17 @@ services:
- backend
image: prom/prometheus:latest
volumes:
- - "./:/etc/prometheus/"
+ - "./prometheus.yml:/etc/prometheus/prometheus.yml"
+ prometheus02:
+ expose:
+ - "9091"
+ ports:
+ - "0.0.0.0:9091:9090"
+ networks:
+ - backend
+ image: prom/prometheus:latest
+ volumes:
+ - "./prometheus.yml:/etc/prometheus/prometheus.yml"
grafana:
build:
context: ../../../
diff --git a/scripts/development/m3_stack/m3aggregator.yml b/scripts/development/m3_stack/m3aggregator.yml
index 92105e8db5..ea9be32dc0 100644
--- a/scripts/development/m3_stack/m3aggregator.yml
+++ b/scripts/development/m3_stack/m3aggregator.yml
@@ -7,6 +7,8 @@ metrics:
prometheus:
onError: none
handlerPath: /metrics
+ listenAddress: 0.0.0.0:6002
+ timerType: histogram
sanitization: prometheus
samplingRate: 1.0
extended: none
@@ -16,58 +18,18 @@ http:
readTimeout: 60s
writeTimeout: 60s
-rawtcp:
- listenAddress: 0.0.0.0:6000
- keepAliveEnabled: true
- keepAlivePeriod: 1m
- retry:
- initialBackoff: 5ms
- backoffFactor: 2.0
- maxBackoff: 1s
- forever: true
- jitter: true
- readBufferSize: 1440
- msgpackIterator:
- ignoreHigherVersion: false
- readerBufferSize: 1440
- largeFloatsSize: 1024
- largeFloatsPool:
- buckets:
- - count: 1024
- capacity: 2048
- - count: 512
- capacity: 4096
- - count: 256
- capacity: 8192
- - count: 128
- capacity: 16384
- - count: 64
- capacity: 32768
- - count: 32
- capacity: 65536
- watermark:
- low: 0.001
- high: 0.002
- protobufIterator:
- initBufferSize: 1440
- maxMessageSize: 50000000 # max message size is 50MB
- bytesPool:
- buckets:
- - count: 1024
- capacity: 2048
- - count: 512
- capacity: 4096
- - count: 256
- capacity: 8192
- - count: 128
- capacity: 16384
- - count: 64
- capacity: 32768
- - count: 32
- capacity: 65536
+m3msg:
+ server:
+ listenAddress: 0.0.0.0:6000
+ retry:
+ maxBackoff: 10s
+ jitter: true
+ consumer:
+ messagePool:
+ size: 16384
watermark:
- low: 0.001
- high: 0.002
+ low: 0.2
+ high: 0.5
kvClient:
etcd:
@@ -128,39 +90,24 @@ aggregator:
- count: 1024
capacity: 64
client:
- placementKV:
- namespace: /placement
- zone: embedded
- environment: default_env
- placementWatcher:
- key: m3aggregator
- initWatchTimeout: 15s
- hashType: murmur32
- shardCutoffLingerDuration: 1m
- encoder:
- initBufferSize: 100
- maxMessageSize: 50000000
- bytesPool:
- buckets:
- - capacity: 16
- count: 10
- - capacity: 32
- count: 20
- watermark:
- low: 0.001
- high: 0.01
- flushSize: 1440
- maxTimerBatchSize: 140
- queueSize: 1000
- queueDropType: oldest
- connection:
- connectionTimeout: 1s
- connectionKeepAlive: true
- writeTimeout: 1s
- initReconnectThreshold: 2
- maxReconnectThreshold: 5000
- reconnectThresholdMultiplier: 2
- maxReconnectDuration: 1m
+ type: m3msg
+ m3msg:
+ producer:
+ writer:
+ topicName: aggregator_ingest
+ topicServiceOverride:
+ zone: embedded
+ environment: default_env
+ placement:
+ isStaged: true
+ placementServiceOverride:
+ namespaces:
+ placement: /placement
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
placementManager:
kvConfig:
namespace: /placement
@@ -244,25 +191,14 @@ aggregator:
topicServiceOverride:
zone: embedded
environment: default_env
- messageRetry:
- initialBackoff: 1m
- maxBackoff: 2m
- messageQueueNewWritesScanInterval: 1s
- ackErrorRetry:
- initialBackoff: 2s
- maxBackoff: 10s
- connection:
- dialTimeout: 5s
- writeTimeout: 5s
- retry:
- initialBackoff: 1s
- maxBackoff: 10s
- flushInterval: 1s
- writeBufferSize: 16384
- readBufferSize: 256
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
forwarding:
maxSingleDelay: 5s
- entryTTL: 6h
+ entryTTL: 1h
entryCheckInterval: 10m
maxTimerBatchSizePerWrite: 140
defaultStoragePolicies:
diff --git a/scripts/development/m3_stack/m3coordinator-aggregator.yml b/scripts/development/m3_stack/m3coordinator-aggregator.yml
new file mode 100644
index 0000000000..a9a4d40b32
--- /dev/null
+++ b/scripts/development/m3_stack/m3coordinator-aggregator.yml
@@ -0,0 +1,83 @@
+listenAddress:
+ value: "0.0.0.0:7201"
+
+logging:
+ level: info
+
+metrics:
+ scope:
+ prefix: "coordinator"
+ prometheus:
+ handlerPath: /metrics
+ listenAddress: 0.0.0.0:7210 # until https://github.com/m3db/m3/issues/682 is resolved
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+
+clusters:
+ - namespaces:
+ - namespace: metrics_0_30m
+ type: unaggregated
+ retention: 30m
+ - namespace: metrics_30s_24h
+ type: aggregated
+ retention: 24h
+ resolution: 30s
+ client:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - m3db_seed:2379
+
+downsample:
+ remoteAggregator:
+ client:
+ type: m3msg
+ m3msg:
+ producer:
+ writer:
+ topicName: aggregator_ingest
+ topicServiceOverride:
+ zone: embedded
+ environment: default_env
+ placement:
+ isStaged: true
+ placementServiceOverride:
+ namespaces:
+ placement: /placement
+ connection:
+ numConnections: 32
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
+
+ingest:
+ ingester:
+ workerPoolSize: 10000
+ opPool:
+ size: 10000
+ retry:
+ maxRetries: 3
+ jitter: true
+ logSampleRate: 0.01
+ m3msg:
+ server:
+ listenAddress: "0.0.0.0:7507"
+ retry:
+ maxBackoff: 10s
+ jitter: true
+
+carbon:
+ ingester:
+ listenAddress: "0.0.0.0:7204"
+
+tagOptions:
+ idScheme: quoted
diff --git a/scripts/development/m3_stack/m3coordinator-snippet-multiprocess.yml b/scripts/development/m3_stack/m3coordinator-snippet-multiprocess.yml
new file mode 100644
index 0000000000..d0b1eaa3bd
--- /dev/null
+++ b/scripts/development/m3_stack/m3coordinator-snippet-multiprocess.yml
@@ -0,0 +1,3 @@
+multiProcess:
+ enabled: true
+ count: 4
diff --git a/scripts/development/m3_stack/m3coordinator.yml b/scripts/development/m3_stack/m3coordinator-standard.yml
similarity index 65%
rename from scripts/development/m3_stack/m3coordinator.yml
rename to scripts/development/m3_stack/m3coordinator-standard.yml
index 4c7108c4e4..bf72b395a9 100644
--- a/scripts/development/m3_stack/m3coordinator.yml
+++ b/scripts/development/m3_stack/m3coordinator-standard.yml
@@ -9,7 +9,7 @@ metrics:
prefix: "coordinator"
prometheus:
handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
+ listenAddress: 0.0.0.0:7210 # until https://github.com/m3db/m3/issues/682 is resolved
sanitization: prometheus
samplingRate: 1.0
extended: none
@@ -19,10 +19,10 @@ clusters:
- namespace: metrics_0_30m
type: unaggregated
retention: 30m
- - namespace: metrics_10s_48h
+ - namespace: metrics_30s_24h
type: aggregated
- retention: 48h
- resolution: 10s
+ retention: 24h
+ resolution: 30s
client:
config:
service:
@@ -34,25 +34,11 @@ clusters:
- zone: embedded
endpoints:
- m3db_seed:2379
+ # Uncomment for proto use
+ # --
# proto:
# schemaFilePath: /etc/m3coordinator/schema.proto
-ingest:
- ingester:
- workerPoolSize: 10000
- opPool:
- size: 10000
- retry:
- maxRetries: 3
- jitter: true
- logSampleRate: 0.01
- m3msg:
- server:
- listenAddress: "0.0.0.0:7507"
- retry:
- maxBackoff: 10s
- jitter: true
-
carbon:
ingester:
listenAddress: "0.0.0.0:7204"
diff --git a/scripts/development/m3_stack/prometheus.yml b/scripts/development/m3_stack/prometheus.yml
index fed59356ad..e160291ce8 100644
--- a/scripts/development/m3_stack/prometheus.yml
+++ b/scripts/development/m3_stack/prometheus.yml
@@ -1,8 +1,8 @@
global:
external_labels:
role: "remote"
- scrape_interval: 15s
- evaluation_interval: 15s
+ scrape_interval: 10s
+ evaluation_interval: 10s
# Alertmanager configuration
alerting:
@@ -20,24 +20,42 @@ rule_files:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=` to any timeseries scraped from this config.
- - job_name: 'prometheus'
+ - job_name: 'prometheus01'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- - targets: ['localhost:9090']
+ - targets: ['prometheus01:9090']
- job_name: 'coordinator'
static_configs:
- - targets: ['m3coordinator01:7203']
+ - targets:
+ - m3coordinator01:7210
+ - m3coordinator01:7211
+ - m3coordinator01:7212
+ - m3coordinator01:7213
- job_name: 'dbnode'
static_configs:
- targets: ['m3db_seed:9004', 'm3db_data01:9004', 'm3db_data02:9004']
+ - job_name: 'aggregator'
+ static_configs:
+ - targets: ['m3aggregator01:6002', 'm3aggregator01:6002']
+
remote_read:
- url: http://m3coordinator01:7201/api/v1/prom/remote/read
+ read_recent: true
remote_write:
- url: http://m3coordinator01:7201/api/v1/prom/remote/write
+ remote_timeout: 30s
+ queue_config:
+ capacity: 10000
+ max_shards: 10
+ min_shards: 3
+ max_samples_per_send: 5000
+ batch_send_deadline: 1m
+ min_backoff: 50ms
+ max_backoff: 1s
diff --git a/scripts/development/m3_stack/start_m3.sh b/scripts/development/m3_stack/start_m3.sh
index d2170ea956..0550433ebd 100755
--- a/scripts/development/m3_stack/start_m3.sh
+++ b/scripts/development/m3_stack/start_m3.sh
@@ -7,10 +7,11 @@ source "$(pwd)/../../docker-integration-tests/common.sh"
# Locally don't care if we hot loop faster
export MAX_TIMEOUT=4
+RELATIVE="./../../.."
+prepare_build_cmd() {
+ build_cmd="cd $RELATIVE && make clean-build docker-dev-prep && cp -r ./docker ./bin/ && $1"
+}
DOCKER_ARGS="-d --renew-anon-volumes"
-if [[ "$FORCE_BUILD" = true ]] ; then
- DOCKER_ARGS="--build -d --renew-anon-volumes"
-fi
echo "Bringing up nodes in the background with docker compose, remember to run ./stop.sh when done"
@@ -26,12 +27,23 @@ if [[ "$USE_JAEGER" = true ]] ; then
fi
fi
-docker-compose -f docker-compose.yml up $DOCKER_ARGS m3coordinator01
-docker-compose -f docker-compose.yml up $DOCKER_ARGS m3db_seed
-docker-compose -f docker-compose.yml up $DOCKER_ARGS prometheus01
-docker-compose -f docker-compose.yml up $DOCKER_ARGS grafana
+M3DBNODE_DEV_IMG=$(docker images m3dbnode:dev | fgrep -iv repository | wc -l | xargs)
+M3COORDINATOR_DEV_IMG=$(docker images m3coordinator:dev | fgrep -iv repository | wc -l | xargs)
+M3AGGREGATOR_DEV_IMG=$(docker images m3aggregator:dev | fgrep -iv repository | wc -l | xargs)
+M3COLLECTOR_DEV_IMG=$(docker images m3collector:dev | fgrep -iv repository | wc -l | xargs)
+
+if [[ "$M3DBNODE_DEV_IMG" == "0" ]] || [[ "$FORCE_BUILD" == true ]] || [[ "$BUILD_M3DBNODE" == true ]]; then
+ prepare_build_cmd "make m3dbnode-linux-amd64"
+ echo "Building m3dbnode binary first"
+ bash -c "$build_cmd"
+
+ docker-compose -f docker-compose.yml up --build $DOCKER_ARGS m3db_seed
+else
+ docker-compose -f docker-compose.yml up $DOCKER_ARGS m3db_seed
+fi
-if [[ "$MULTI_DB_NODE" = true ]] ; then
+# Bring up any other replicas
+if [[ "$USE_MULTI_DB_NODES" = true ]] ; then
echo "Running multi node"
docker-compose -f docker-compose.yml up $DOCKER_ARGS m3db_data01
docker-compose -f docker-compose.yml up $DOCKER_ARGS m3db_data02
@@ -39,35 +51,122 @@ else
echo "Running single node"
fi
+# Use standard coordinator config when bringing up coordinator first time
+# Note: Use ".tmp" suffix to be git ignored.
+cp ./m3coordinator-standard.yml ./m3coordinator.yml.tmp
+if [[ "$USE_MULTIPROCESS_COORDINATOR" = true ]]; then
+ cat ./m3coordinator-snippet-multiprocess.yml >> ./m3coordinator.yml.tmp
+fi
+
+if [[ "$M3COORDINATOR_DEV_IMG" == "0" ]] || [[ "$FORCE_BUILD" == true ]] || [[ "$BUILD_M3COORDINATOR" == true ]]; then
+ prepare_build_cmd "make m3coordinator-linux-amd64"
+ echo "Building m3coordinator binary first"
+ bash -c "$build_cmd"
+
+ docker-compose -f docker-compose.yml up --build $DOCKER_ARGS m3coordinator01
+else
+ docker-compose -f docker-compose.yml up $DOCKER_ARGS m3coordinator01
+fi
+
echo "Wait for coordinator API to be up"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
'curl -vvvsSf localhost:7201/health'
-if [[ "$AGGREGATOR_PIPELINE" = true ]]; then
+if [[ "$USE_AGGREGATOR" = true ]]; then
echo "Running aggregator pipeline"
- curl -vvvsSf -X POST localhost:7201/api/v1/services/m3aggregator/placement/init -d '{
- "num_shards": 64,
- "replication_factor": 1,
- "instances": [
- {
- "id": "m3aggregator01",
- "isolation_group": "rack-a",
- "zone": "embedded",
- "weight": 1024,
- "endpoint": "m3aggregator01:6000",
- "hostname": "m3aggregator01",
- "port": 6000
- }
- ]
+ if [[ "$USE_AGGREGATOR_HA" != true ]]; then
+ # Use single replica.
+ curl -vvvsSf -X POST localhost:7201/api/v1/services/m3aggregator/placement/init -d '{
+ "num_shards": 64,
+ "replication_factor": 1,
+ "instances": [
+ {
+ "id": "m3aggregator01",
+ "isolation_group": "rack-a",
+ "zone": "embedded",
+ "weight": 1024,
+ "endpoint": "m3aggregator01:6000",
+ "hostname": "m3aggregator01",
+ "port": 6000
+ }
+ ]
+ }'
+ else
+ # Use two replicas.
+ curl -vvvsSf -X POST localhost:7201/api/v1/services/m3aggregator/placement/init -d '{
+ "num_shards": 64,
+ "replication_factor": 2,
+ "instances": [
+ {
+ "id": "m3aggregator01",
+ "isolation_group": "rack-a",
+ "zone": "embedded",
+ "weight": 1024,
+ "endpoint": "m3aggregator01:6000",
+ "hostname": "m3aggregator01",
+ "port": 6000
+ },
+ {
+ "id": "m3aggregator02",
+ "isolation_group": "rack-b",
+ "zone": "embedded",
+ "weight": 1024,
+ "endpoint": "m3aggregator02:6000",
+ "hostname": "m3aggregator02",
+ "port": 6000
+ }
+ ]
+ }'
+ fi
+
+ echo "Initializing m3msg inbound topic for m3aggregator ingestion from m3coordinators"
+ curl -vvvsSf -X POST -H "Topic-Name: aggregator_ingest" -H "Cluster-Environment-Name: default_env" localhost:7201/api/v1/topic/init -d '{
+ "numberOfShards": 64
}'
- echo "Initializing m3msg topic for ingestion"
- curl -vvvsSf -X POST localhost:7201/api/v1/topic/init -d '{
+ echo "Adding m3aggregator as a consumer to the aggregator ingest topic"
+ curl -vvvsSf -X POST -H "Topic-Name: aggregator_ingest" -H "Cluster-Environment-Name: default_env" localhost:7201/api/v1/topic -d '{
+ "consumerService": {
+ "serviceId": {
+ "name": "m3aggregator",
+ "environment": "default_env",
+ "zone": "embedded"
+ },
+ "consumptionType": "REPLICATED",
+ "messageTtlNanos": "600000000000"
+ }
+ }' # msgs will be discarded after 600000000000ns = 10mins
+
+ # Create outbound m3msg topic for m3 aggregators to coordinators
+ echo "Initializing m3msg outbound topic for m3 aggregators to coordinators"
+ curl -vvvsSf -X POST -H "Topic-Name: aggregated_metrics" -H "Cluster-Environment-Name: default_env" localhost:7201/api/v1/topic/init -d '{
"numberOfShards": 64
}'
- docker-compose -f docker-compose.yml up $DOCKER_ARGS m3aggregator01
- docker-compose -f docker-compose.yml up $DOCKER_ARGS m3collector01
+ if [[ "$M3AGGREGATOR_DEV_IMG" == "0" ]] || [[ "$FORCE_BUILD" == true ]] || [[ "$BUILD_M3AGGREGATOR" == true ]]; then
+ prepare_build_cmd "make m3aggregator-linux-amd64"
+ echo "Building m3aggregator binary first"
+ bash -c "$build_cmd"
+
+ docker-compose -f docker-compose.yml up --build $DOCKER_ARGS m3aggregator01
+ else
+ docker-compose -f docker-compose.yml up $DOCKER_ARGS m3aggregator01
+ fi
+
+ if [[ "$USE_AGGREGATOR_HA" == true ]]; then
+ # Bring up the second replica
+ docker-compose -f docker-compose.yml up $DOCKER_ARGS m3aggregator02
+ fi
+
+ if [[ "$M3COLLECTOR_DEV_IMG" == "0" ]] || [[ "$FORCE_BUILD" == true ]] || [[ "$BUILD_M3COLLECTOR" == true ]]; then
+ prepare_build_cmd "make m3collector-linux-amd64"
+ echo "Building m3collector binary first"
+ bash -c "$build_cmd"
+
+ docker-compose -f docker-compose.yml up --build $DOCKER_ARGS m3collector01
+ else
+ docker-compose -f docker-compose.yml up $DOCKER_ARGS m3collector01
+ fi
else
echo "Not running aggregator pipeline"
fi
@@ -97,7 +196,7 @@ curl -vvvsSf -X POST localhost:7201/api/v1/namespace -d '{
}
}'
curl -vvvsSf -X POST localhost:7201/api/v1/namespace -d '{
- "name": "metrics_10s_48h",
+ "name": "metrics_30s_24h",
"options": {
"bootstrapEnabled": true,
"flushEnabled": true,
@@ -106,8 +205,8 @@ curl -vvvsSf -X POST localhost:7201/api/v1/namespace -d '{
"snapshotEnabled": true,
"repairEnabled": false,
"retentionOptions": {
- "retentionPeriodDuration": "48h",
- "blockSizeDuration": "4h",
+ "retentionPeriodDuration": "24h",
+ "blockSizeDuration": "2h",
"bufferFutureDuration": "10m",
"bufferPastDuration": "10m",
"blockDataExpiry": true,
@@ -115,7 +214,7 @@ curl -vvvsSf -X POST localhost:7201/api/v1/namespace -d '{
},
"indexOptions": {
"enabled": true,
- "blockSizeDuration": "4h"
+ "blockSizeDuration": "2h"
}
}
}'
@@ -123,11 +222,11 @@ echo "Done initializing namespaces"
echo "Validating namespace"
[ "$(curl -sSf localhost:7201/api/v1/namespace | jq .registry.namespaces.metrics_0_30m.indexOptions.enabled)" == true ]
-[ "$(curl -sSf localhost:7201/api/v1/namespace | jq .registry.namespaces.metrics_10s_48h.indexOptions.enabled)" == true ]
+[ "$(curl -sSf localhost:7201/api/v1/namespace | jq .registry.namespaces.metrics_30s_24h.indexOptions.enabled)" == true ]
echo "Done validating namespace"
echo "Initializing topology"
-if [[ "$MULTI_DB_NODE" = true ]] ; then
+if [[ "$USE_MULTI_DB_NODES" = true ]] ; then
curl -vvvsSf -X POST localhost:7201/api/v1/placement/init -d '{
"num_shards": 64,
"replication_factor": 3,
@@ -187,7 +286,7 @@ echo "Waiting until shards are marked as available"
ATTEMPTS=100 TIMEOUT=2 retry_with_backoff \
'[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | grep -c INITIALIZING)" -eq 0 ]'
-if [[ "$AGGREGATOR_PIPELINE" = true ]]; then
+if [[ "$USE_AGGREGATOR" = true ]]; then
echo "Initializing M3Coordinator topology"
curl -vvvsSf -X POST localhost:7201/api/v1/services/m3coordinator/placement/init -d '{
"instances": [
@@ -207,8 +306,8 @@ if [[ "$AGGREGATOR_PIPELINE" = true ]]; then
echo "Done validating topology"
# Do this after placement for m3coordinator is created.
- echo "Adding m3coordinator as a consumer to the topic"
- curl -vvvsSf -X POST localhost:7201/api/v1/topic -d '{
+ echo "Adding coordinator as a consumer to the aggregator outbound topic"
+ curl -vvvsSf -X POST -H "Topic-Name: aggregated_metrics" -H "Cluster-Environment-Name: default_env" localhost:7201/api/v1/topic -d '{
"consumerService": {
"serviceId": {
"name": "m3coordinator",
@@ -220,14 +319,39 @@ if [[ "$AGGREGATOR_PIPELINE" = true ]]; then
}
}' # msgs will be discarded after 600000000000ns = 10mins
+ # Restart with aggregator coordinator config
+ docker-compose -f docker-compose.yml stop m3coordinator01
+
+ # Note: Use ".tmp" suffix to be git ignored.
+ cp ./m3coordinator-aggregator.yml ./m3coordinator.yml.tmp
+ if [[ "$USE_MULTIPROCESS_COORDINATOR" = true ]]; then
+ cat ./m3coordinator-snippet-multiprocess.yml >> ./m3coordinator.yml.tmp
+ fi
+
+ docker-compose -f docker-compose.yml up $DOCKER_ARGS m3coordinator01
+
# May not necessarily flush
echo "Sending unaggregated metric to m3collector"
curl http://localhost:7206/api/v1/json/report -X POST -d '{"metrics":[{"type":"gauge","value":42,"tags":{"__name__":"foo_metric","foo":"bar"}}]}'
fi
+echo "Starting Prometheus"
+docker-compose -f docker-compose.yml up $DOCKER_ARGS prometheus01
+
+if [[ "$USE_PROMETHEUS_HA" = true ]] ; then
+ echo "Starting Prometheus HA replica"
+ docker-compose -f docker-compose.yml up $DOCKER_ARGS prometheus02
+fi
+
+echo "Starting Grafana"
+docker-compose -f docker-compose.yml up $DOCKER_ARGS grafana
+
if [[ "$USE_JAEGER" = true ]] ; then
echo "Jaeger UI available at localhost:16686"
fi
echo "Prometheus available at localhost:9090"
+if [[ "$USE_PROMETHEUS_HA" = true ]] ; then
+ echo "Prometheus HA replica available at localhost:9091"
+fi
echo "Grafana available at localhost:3000"
echo "Run ./stop.sh to shutdown nodes when done"
diff --git a/scripts/docker-integration-tests/aggregator/m3aggregator.yml b/scripts/docker-integration-tests/aggregator/m3aggregator.yml
index 58d0370d4e..600dce1867 100644
--- a/scripts/docker-integration-tests/aggregator/m3aggregator.yml
+++ b/scripts/docker-integration-tests/aggregator/m3aggregator.yml
@@ -7,68 +7,30 @@ metrics:
prometheus:
onError: none
handlerPath: /metrics
+ listenAddress: 0.0.0.0:6002
+ timerType: histogram
sanitization: prometheus
samplingRate: 1.0
extended: none
+m3msg:
+ server:
+ listenAddress: 0.0.0.0:6000
+ retry:
+ maxBackoff: 10s
+ jitter: true
+ consumer:
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
+
http:
listenAddress: 0.0.0.0:6001
readTimeout: 60s
writeTimeout: 60s
-rawtcp:
- listenAddress: 0.0.0.0:6000
- keepAliveEnabled: true
- keepAlivePeriod: 1m
- retry:
- initialBackoff: 5ms
- backoffFactor: 2.0
- maxBackoff: 1s
- forever: true
- jitter: true
- readBufferSize: 1440
- msgpackIterator:
- ignoreHigherVersion: false
- readerBufferSize: 1440
- largeFloatsSize: 1024
- largeFloatsPool:
- buckets:
- - count: 1024
- capacity: 2048
- - count: 512
- capacity: 4096
- - count: 256
- capacity: 8192
- - count: 128
- capacity: 16384
- - count: 64
- capacity: 32768
- - count: 32
- capacity: 65536
- watermark:
- low: 0.001
- high: 0.002
- protobufIterator:
- initBufferSize: 1440
- maxMessageSize: 50000000 # max message size is 50MB
- bytesPool:
- buckets:
- - count: 1024
- capacity: 2048
- - count: 512
- capacity: 4096
- - count: 256
- capacity: 8192
- - count: 128
- capacity: 16384
- - count: 64
- capacity: 32768
- - count: 32
- capacity: 65536
- watermark:
- low: 0.001
- high: 0.002
-
kvClient:
etcd:
env: override_test_env
@@ -96,6 +58,7 @@ aggregator:
envVarName: M3AGGREGATOR_HOST_ID
instanceID:
type: host_id
+ verboseErrors: true
metricPrefix: ""
counterPrefix: ""
timerPrefix: ""
@@ -128,39 +91,24 @@ aggregator:
- count: 1024
capacity: 64
client:
- placementKV:
- namespace: /placement
- zone: embedded
- environment: override_test_env
- placementWatcher:
- key: m3aggregator
- initWatchTimeout: 15s
- hashType: murmur32
- shardCutoffLingerDuration: 1m
- encoder:
- initBufferSize: 100
- maxMessageSize: 50000000
- bytesPool:
- buckets:
- - capacity: 16
- count: 10
- - capacity: 32
- count: 20
- watermark:
- low: 0.001
- high: 0.01
- flushSize: 1440
- maxTimerBatchSize: 140
- queueSize: 1000
- queueDropType: oldest
- connection:
- connectionTimeout: 1s
- connectionKeepAlive: true
- writeTimeout: 1s
- initReconnectThreshold: 2
- maxReconnectThreshold: 5000
- reconnectThresholdMultiplier: 2
- maxReconnectDuration: 1m
+ type: m3msg
+ m3msg:
+ producer:
+ writer:
+ topicName: aggregator_ingest
+ topicServiceOverride:
+ zone: embedded
+ environment: override_test_env
+ placement:
+ isStaged: true
+ placementServiceOverride:
+ namespaces:
+ placement: /placement
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
placementManager:
kvConfig:
namespace: /placement
@@ -172,6 +120,7 @@ aggregator:
hashType: murmur32
bufferDurationBeforeShardCutover: 10m
bufferDurationAfterShardCutoff: 10m
+ bufferDurationForFutureTimedMetric: 10m # Allow test to write into future.
resignTimeout: 1m
flushTimesManager:
kvConfig:
@@ -237,36 +186,23 @@ aggregator:
name: m3msg
hashType: murmur32
producer:
- buffer:
- maxBufferSize: 1000000000 # max buffer before m3msg start dropping data.
writer:
topicName: aggregated_metrics
topicServiceOverride:
zone: embedded
environment: override_test_env
- messageRetry:
- initialBackoff: 1m
- maxBackoff: 2m
- messageQueueNewWritesScanInterval: 1s
- ackErrorRetry:
- initialBackoff: 2s
- maxBackoff: 10s
- connection:
- dialTimeout: 5s
- writeTimeout: 5s
- retry:
- initialBackoff: 1s
- maxBackoff: 10s
- flushInterval: 1s
- writeBufferSize: 16384
- readBufferSize: 256
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
+ passthrough:
+ enabled: true
forwarding:
- maxSingleDelay: 5s
- entryTTL: 6h
+ maxConstDelay: 1m # Need to add some buffer window, since timed metrics by default are delayed by 1min.
+ entryTTL: 1h
entryCheckInterval: 10m
maxTimerBatchSizePerWrite: 140
- defaultStoragePolicies:
- - 10s:2d
maxNumCachedSourceSets: 2
discardNaNAggregatedValues: true
entryPool:
diff --git a/scripts/docker-integration-tests/aggregator/m3coordinator.yml b/scripts/docker-integration-tests/aggregator/m3coordinator.yml
index 475afe191f..8c730ecfc0 100644
--- a/scripts/docker-integration-tests/aggregator/m3coordinator.yml
+++ b/scripts/docker-integration-tests/aggregator/m3coordinator.yml
@@ -52,34 +52,42 @@ clusters:
readConsistencyLevel: unstrict_majority
downsample:
+ rules:
+ rollupRules:
+ - name: "requests per second by status code"
+ filter: "__name__:http_requests app:* status_code:* endpoint:*"
+ transforms:
+ - transform:
+ type: "PerSecond"
+ - rollup:
+ metricName: "http_requests_by_status_code"
+ groupBy: ["app", "status_code", "endpoint"]
+ aggregations: ["Sum"]
+ storagePolicies:
+ - resolution: 10s
+ retention: 6h
remoteAggregator:
client:
- placementKV:
- namespace: /placement
- environment: override_test_env
- placementWatcher:
- key: m3aggregator
- initWatchTimeout: 10s
- hashType: murmur32
- shardCutoffLingerDuration: 1m
- flushSize: 1440
- maxTimerBatchSize: 1120
- queueSize: 10000
- queueDropType: oldest
- encoder:
- initBufferSize: 2048
- maxMessageSize: 10485760
- bytesPool:
- buckets:
- - capacity: 2048
- count: 4096
- - capacity: 4096
- count: 4096
- watermark:
- low: 0.7
- high: 1.0
- connection:
- writeTimeout: 250ms
+ type: m3msg
+ m3msg:
+ producer:
+ writer:
+ topicName: aggregator_ingest
+ topicServiceOverride:
+ zone: embedded
+ environment: override_test_env
+ placement:
+ isStaged: true
+ placementServiceOverride:
+ namespaces:
+ placement: /placement
+ connection:
+ numConnections: 4
+ messagePool:
+ size: 16384
+ watermark:
+ low: 0.2
+ high: 0.5
ingest:
ingester:
diff --git a/scripts/docker-integration-tests/aggregator/test.sh b/scripts/docker-integration-tests/aggregator/test.sh
index e0b8d7ce13..0c48d2ddd5 100755
--- a/scripts/docker-integration-tests/aggregator/test.sh
+++ b/scripts/docker-integration-tests/aggregator/test.sh
@@ -5,8 +5,15 @@ set -xe
source $GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/common.sh
REVISION=$(git rev-parse HEAD)
COMPOSE_FILE=$GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/aggregator/docker-compose.yml
+# quay.io/m3db/prometheus_remote_client_golang @ v0.4.3
+PROMREMOTECLI_IMAGE=quay.io/m3db/prometheus_remote_client_golang@sha256:fc56df819bff9a5a087484804acf3a584dd4a78c68900c31a28896ed66ca7e7b
+JQ_IMAGE=realguess/jq:1.4@sha256:300c5d9fb1d74154248d155ce182e207cf6630acccbaadd0168e18b15bfaa786
export REVISION
+echo "Pull containers required for test"
+docker pull $PROMREMOTECLI_IMAGE
+docker pull $JQ_IMAGE
+
echo "Run m3dbnode"
docker-compose -f ${COMPOSE_FILE} up -d dbnode01
@@ -50,11 +57,25 @@ curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:
]
}'
-echo "Initializing m3msg topic for m3coordinator ingestion from m3aggregators"
-curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic/init -d '{
+echo "Initializing m3msg inbound topic for m3aggregator ingestion from m3coordinators"
+curl -vvvsSf -X POST -H "Topic-Name: aggregator_ingest" -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic/init -d '{
"numberOfShards": 64
}'
+# Do this after placement and topic for m3aggregator is created.
+echo "Adding m3aggregator as a consumer to the aggregator ingest topic"
+curl -vvvsSf -X POST -H "Topic-Name: aggregator_ingest" -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic -d '{
+ "consumerService": {
+ "serviceId": {
+ "name": "m3aggregator",
+ "environment": "override_test_env",
+ "zone": "embedded"
+ },
+ "consumptionType": "REPLICATED",
+ "messageTtlNanos": "600000000000"
+ }
+}' # msgs will be discarded after 600000000000ns = 10mins
+
echo "Initializing m3coordinator topology"
curl -vvvsSf -X POST localhost:7201/api/v1/services/m3coordinator/placement/init -d '{
"instances": [
@@ -74,8 +95,13 @@ echo "Validating m3coordinator topology"
echo "Done validating topology"
# Do this after placement for m3coordinator is created.
-echo "Adding m3coordinator as a consumer to the aggregator topic"
-curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic -d '{
+echo "Initializing m3msg outbound topic for m3coordinator ingestion from m3aggregators"
+curl -vvvsSf -X POST -H "Topic-Name: aggregated_metrics" -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic/init -d '{
+ "numberOfShards": 64
+}'
+
+echo "Adding m3coordinator as a consumer to the aggregator publish topic"
+curl -vvvsSf -X POST -H "Topic-Name: aggregated_metrics" -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic -d '{
"consumerService": {
"serviceId": {
"name": "m3coordinator",
@@ -107,7 +133,7 @@ function read_carbon {
end=$(date +%s)
start=$(($end-1000))
RESPONSE=$(curl -sSfg "http://${COORDINATOR_API}/api/v1/graphite/render?target=$target&from=$start&until=$end")
- test "$(echo "$RESPONSE" | jq ".[0].datapoints | .[][0] | select(. != null)" | tail -n 1)" = "$expected_val"
+ test "$(echo "$RESPONSE" | jq ".[0].datapoints | .[][0] | select(. != null)" | jq -s last)" = "$expected_val"
return $?
}
@@ -118,8 +144,146 @@ bash -c 'while true; do t=$(date +%s); echo "foo.bar.baz 40 $t" | nc 0.0.0.0 720
# Track PID to kill on exit
METRIC_EMIT_PID="$!"
-# Read back the averaged averaged metric, we configured graphite
-# aggregation policy to average each tile and we are emitting
-# values 40 and 44 to get an average of 42 each tile
-echo "Read back aggregated averaged metric"
-ATTEMPTS=10 TIMEOUT=1 retry_with_backoff read_carbon foo.bar.* 42
+function test_aggregated_graphite_metric {
+ # Read back the averaged averaged metric, we configured graphite
+ # aggregation policy to average each tile and we are emitting
+ # values 40 and 44 to get an average of 42 each tile
+ echo "Read back aggregated averaged metric"
+ ATTEMPTS=100 TIMEOUT=1 MAX_TIMEOUT=4 retry_with_backoff read_carbon foo.bar.* 42
+
+ echo "Finished with carbon metrics"
+ kill $METRIC_EMIT_PID
+ export METRIC_EMIT_PID="-1"
+}
+
+function prometheus_remote_write {
+ local metric_name=$1
+ local datapoint_timestamp=$2
+ local datapoint_value=$3
+ local expect_success=$4
+ local expect_success_err=$5
+ local expect_status=$6
+ local expect_status_err=$7
+ local label0_name=${label0_name:-label0}
+ local label0_value=${label0_value:-label0}
+ local label1_name=${label1_name:-label1}
+ local label1_value=${label1_value:-label1}
+ local label2_name=${label2_name:-label2}
+ local label2_value=${label2_value:-label2}
+
+ network_name="aggregator"
+ network=$(docker network ls | fgrep $network_name | tr -s ' ' | cut -f 1 -d ' ' | tail -n 1)
+ out=$((docker run -it --rm --network $network \
+ $PROMREMOTECLI_IMAGE \
+ -u http://m3coordinator01:7202/api/v1/prom/remote/write \
+ -t __name__:${metric_name} \
+ -t ${label0_name}:${label0_value} \
+ -t ${label1_name}:${label1_value} \
+ -t ${label2_name}:${label2_value} \
+ -d ${datapoint_timestamp},${datapoint_value} | grep -v promremotecli_log) || true)
+ success=$(echo $out | grep -v promremotecli_log | docker run --rm -i $JQ_IMAGE jq .success)
+ status=$(echo $out | grep -v promremotecli_log | docker run --rm -i $JQ_IMAGE jq .statusCode)
+ if [[ "$success" != "$expect_success" ]]; then
+ echo $expect_success_err
+ return 1
+ fi
+ if [[ "$status" != "$expect_status" ]]; then
+ echo "${expect_status_err}: actual=${status}"
+ return 1
+ fi
+ echo "Returned success=${success}, status=${status} as expected"
+ return 0
+}
+
+function prometheus_query_native {
+ local endpoint=${endpoint:-}
+ local query=${query:-}
+ local params=${params:-}
+ local metrics_type=${metrics_type:-}
+ local metrics_storage_policy=${metrics_storage_policy:-}
+ local jq_path=${jq_path:-}
+ local expected_value=${expected_value:-}
+
+ params_prefixed=""
+ if [[ "$params" != "" ]]; then
+ params_prefixed='&'"${params}"
+ fi
+
+ result=$(curl -s \
+ -H "M3-Metrics-Type: ${metrics_type}" \
+ -H "M3-Storage-Policy: ${metrics_storage_policy}" \
+ "0.0.0.0:7202/api/v1/${endpoint}?query=${query}${params_prefixed}" | jq -r "${jq_path}" | jq -s last)
+ test "$result" = "$expected_value"
+ return $?
+}
+
+function test_aggregated_rollup_rule {
+ resolution_seconds="10"
+ now=$(date +"%s")
+ now_truncate_by=$(( $now % $resolution_seconds ))
+ now_truncated=$(( $now - $now_truncate_by ))
+
+ echo "Test write with rollup rule"
+
+ # Emit values for endpoint /foo/bar (to ensure right values aggregated)
+ write_at="$now_truncated"
+ value="42"
+ value_rate="22"
+ value_inc_by=$(( $value_rate * $resolution_seconds ))
+ for i in $(seq 1 10); do
+ label0_name="app" label0_value="nginx_edge" \
+ label1_name="status_code" label1_value="500" \
+ label2_name="endpoint" label2_value="/foo/bar" \
+ prometheus_remote_write \
+ http_requests $write_at $value \
+ true "Expected request to succeed" \
+ 200 "Expected request to return status code 200"
+ write_at=$(( $write_at + $resolution_seconds ))
+ value=$(( $value + $value_inc_by ))
+ done
+
+ # Emit values for endpoint /foo/baz (to ensure right values aggregated)
+ write_at="$now_truncated"
+ value="84"
+ value_rate="4"
+ value_inc_by=$(( $value_rate * $resolution_seconds ))
+ for i in $(seq 1 10); do
+ label0_name="app" label0_value="nginx_edge" \
+ label1_name="status_code" label1_value="500" \
+ label2_name="endpoint" label2_value="/foo/baz" \
+ prometheus_remote_write \
+ http_requests $write_at $value \
+ true "Expected request to succeed" \
+ 200 "Expected request to return status code 200"
+ write_at=$(( $write_at + $resolution_seconds ))
+ value=$(( $value + $value_inc_by ))
+ done
+
+ start=$(( $now - 3600 ))
+ end=$(( $now + 3600 ))
+ step="30s"
+ params_range="start=${start}"'&'"end=${end}"'&'"step=30s"
+ jq_path=".data.result[0].values | .[][1] | select(. != null)"
+
+ echo "Test query rollup rule"
+
+ # Test by values are rolled up by second, then sum (for endpoint="/foo/bar")
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 \
+ endpoint=query_range query="http_requests_by_status_code\{endpoint=\"/foo/bar\"\}" \
+ params="$params_range" \
+ jq_path="$jq_path" expected_value="22" \
+ metrics_type="aggregated" metrics_storage_policy="10s:6h" \
+ retry_with_backoff prometheus_query_native
+
+ # Test by values are rolled up by second, then sum (for endpoint="/foo/bar")
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 \
+ endpoint=query_range query="http_requests_by_status_code\{endpoint=\"/foo/baz\"\}" \
+ params="$params_range" \
+ jq_path="$jq_path" expected_value="4" \
+ metrics_type="aggregated" metrics_storage_policy="10s:6h" \
+ retry_with_backoff prometheus_query_native
+}
+
+echo "Run tests"
+test_aggregated_graphite_metric
+test_aggregated_rollup_rule
diff --git a/scripts/docker-integration-tests/aggregator_legacy/docker-compose.yml b/scripts/docker-integration-tests/aggregator_legacy/docker-compose.yml
new file mode 100644
index 0000000000..c93b41ee25
--- /dev/null
+++ b/scripts/docker-integration-tests/aggregator_legacy/docker-compose.yml
@@ -0,0 +1,50 @@
+version: "3.5"
+services:
+ dbnode01:
+ expose:
+ - "9000-9004"
+ - "2379-2380"
+ - "7201"
+ ports:
+ - "0.0.0.0:9000-9004:9000-9004"
+ - "0.0.0.0:2379-2380:2379-2380"
+ - "0.0.0.0:7201:7201"
+ networks:
+ - backend
+ image: "m3dbnode_integration:${REVISION}"
+ m3coordinator01:
+ expose:
+ - "7202"
+ - "7203"
+ - "7204"
+ ports:
+ - "0.0.0.0:7202:7202"
+ - "0.0.0.0:7203:7203"
+ - "0.0.0.0:7204:7204"
+ networks:
+ - backend
+ image: "m3coordinator_integration:${REVISION}"
+ volumes:
+ - "./m3coordinator.yml:/etc/m3coordinator/m3coordinator.yml"
+ m3aggregator01:
+ expose:
+ - "6001"
+ ports:
+ - "127.0.0.1:6001:6001"
+ networks:
+ - backend
+ environment:
+ - M3AGGREGATOR_HOST_ID=m3aggregator01
+ image: "m3aggregator_integration:${REVISION}"
+ volumes:
+ - "./m3aggregator.yml:/etc/m3aggregator/m3aggregator.yml"
+ m3aggregator02:
+ networks:
+ - backend
+ environment:
+ - M3AGGREGATOR_HOST_ID=m3aggregator02
+ image: "m3aggregator_integration:${REVISION}"
+ volumes:
+ - "./m3aggregator.yml:/etc/m3aggregator/m3aggregator.yml"
+networks:
+ backend:
diff --git a/scripts/docker-integration-tests/aggregator_legacy/m3aggregator.yml b/scripts/docker-integration-tests/aggregator_legacy/m3aggregator.yml
new file mode 100644
index 0000000000..58d0370d4e
--- /dev/null
+++ b/scripts/docker-integration-tests/aggregator_legacy/m3aggregator.yml
@@ -0,0 +1,279 @@
+logging:
+ level: info
+
+metrics:
+ scope:
+ prefix: m3aggregator
+ prometheus:
+ onError: none
+ handlerPath: /metrics
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+
+http:
+ listenAddress: 0.0.0.0:6001
+ readTimeout: 60s
+ writeTimeout: 60s
+
+rawtcp:
+ listenAddress: 0.0.0.0:6000
+ keepAliveEnabled: true
+ keepAlivePeriod: 1m
+ retry:
+ initialBackoff: 5ms
+ backoffFactor: 2.0
+ maxBackoff: 1s
+ forever: true
+ jitter: true
+ readBufferSize: 1440
+ msgpackIterator:
+ ignoreHigherVersion: false
+ readerBufferSize: 1440
+ largeFloatsSize: 1024
+ largeFloatsPool:
+ buckets:
+ - count: 1024
+ capacity: 2048
+ - count: 512
+ capacity: 4096
+ - count: 256
+ capacity: 8192
+ - count: 128
+ capacity: 16384
+ - count: 64
+ capacity: 32768
+ - count: 32
+ capacity: 65536
+ watermark:
+ low: 0.001
+ high: 0.002
+ protobufIterator:
+ initBufferSize: 1440
+ maxMessageSize: 50000000 # max message size is 50MB
+ bytesPool:
+ buckets:
+ - count: 1024
+ capacity: 2048
+ - count: 512
+ capacity: 4096
+ - count: 256
+ capacity: 8192
+ - count: 128
+ capacity: 16384
+ - count: 64
+ capacity: 32768
+ - count: 32
+ capacity: 65536
+ watermark:
+ low: 0.001
+ high: 0.002
+
+kvClient:
+ etcd:
+ env: override_test_env
+ zone: embedded
+ service: m3aggregator
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - dbnode01:2379
+
+runtimeOptions:
+ kvConfig:
+ environment: override_test_env
+ zone: embedded
+ writeValuesPerMetricLimitPerSecondKey: write-values-per-metric-limit-per-second
+ writeValuesPerMetricLimitPerSecond: 0
+ writeNewMetricLimitClusterPerSecondKey: write-new-metric-limit-cluster-per-second
+ writeNewMetricLimitClusterPerSecond: 0
+ writeNewMetricNoLimitWarmupDuration: 0
+
+aggregator:
+ hostID:
+ resolver: environment
+ envVarName: M3AGGREGATOR_HOST_ID
+ instanceID:
+ type: host_id
+ metricPrefix: ""
+ counterPrefix: ""
+ timerPrefix: ""
+ gaugePrefix: ""
+ aggregationTypes:
+ counterTransformFnType: empty
+ timerTransformFnType: suffix
+ gaugeTransformFnType: empty
+ aggregationTypesPool:
+ size: 1024
+ quantilesPool:
+ buckets:
+ - count: 256
+ capacity: 4
+ - count: 128
+ capacity: 8
+ stream:
+ eps: 0.001
+ capacity: 32
+ streamPool:
+ size: 4096
+ samplePool:
+ size: 4096
+ floatsPool:
+ buckets:
+ - count: 4096
+ capacity: 16
+ - count: 2048
+ capacity: 32
+ - count: 1024
+ capacity: 64
+ client:
+ placementKV:
+ namespace: /placement
+ zone: embedded
+ environment: override_test_env
+ placementWatcher:
+ key: m3aggregator
+ initWatchTimeout: 15s
+ hashType: murmur32
+ shardCutoffLingerDuration: 1m
+ encoder:
+ initBufferSize: 100
+ maxMessageSize: 50000000
+ bytesPool:
+ buckets:
+ - capacity: 16
+ count: 10
+ - capacity: 32
+ count: 20
+ watermark:
+ low: 0.001
+ high: 0.01
+ flushSize: 1440
+ maxTimerBatchSize: 140
+ queueSize: 1000
+ queueDropType: oldest
+ connection:
+ connectionTimeout: 1s
+ connectionKeepAlive: true
+ writeTimeout: 1s
+ initReconnectThreshold: 2
+ maxReconnectThreshold: 5000
+ reconnectThresholdMultiplier: 2
+ maxReconnectDuration: 1m
+ placementManager:
+ kvConfig:
+ namespace: /placement
+ environment: override_test_env
+ zone: embedded
+ placementWatcher:
+ key: m3aggregator
+ initWatchTimeout: 10s
+ hashType: murmur32
+ bufferDurationBeforeShardCutover: 10m
+ bufferDurationAfterShardCutoff: 10m
+ resignTimeout: 1m
+ flushTimesManager:
+ kvConfig:
+ environment: override_test_env
+ zone: embedded
+ flushTimesKeyFmt: shardset/%d/flush
+ flushTimesPersistRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 2s
+ maxRetries: 3
+ electionManager:
+ election:
+ leaderTimeout: 10s
+ resignTimeout: 10s
+ ttlSeconds: 10
+ serviceID:
+ name: m3aggregator
+ environment: override_test_env
+ zone: embedded
+ electionKeyFmt: shardset/%d/lock
+ campaignRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 2s
+ forever: true
+ jitter: true
+ changeRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 5s
+ forever: true
+ jitter: true
+ resignRetrier:
+ initialBackoff: 100ms
+ backoffFactor: 2.0
+ maxBackoff: 5s
+ forever: true
+ jitter: true
+ campaignStateCheckInterval: 1s
+ shardCutoffCheckOffset: 30s
+ flushManager:
+ checkEvery: 1s
+ jitterEnabled: true
+ maxJitters:
+ - flushInterval: 5s
+ maxJitterPercent: 1.0
+ - flushInterval: 10s
+ maxJitterPercent: 0.5
+ - flushInterval: 1m
+ maxJitterPercent: 0.5
+ - flushInterval: 10m
+ maxJitterPercent: 0.5
+ - flushInterval: 1h
+ maxJitterPercent: 0.25
+ numWorkersPerCPU: 0.5
+ flushTimesPersistEvery: 10s
+ maxBufferSize: 5m
+ forcedFlushWindowSize: 10s
+ flush:
+ handlers:
+ - dynamicBackend:
+ name: m3msg
+ hashType: murmur32
+ producer:
+ buffer:
+ maxBufferSize: 1000000000 # max buffer before m3msg start dropping data.
+ writer:
+ topicName: aggregated_metrics
+ topicServiceOverride:
+ zone: embedded
+ environment: override_test_env
+ messageRetry:
+ initialBackoff: 1m
+ maxBackoff: 2m
+ messageQueueNewWritesScanInterval: 1s
+ ackErrorRetry:
+ initialBackoff: 2s
+ maxBackoff: 10s
+ connection:
+ dialTimeout: 5s
+ writeTimeout: 5s
+ retry:
+ initialBackoff: 1s
+ maxBackoff: 10s
+ flushInterval: 1s
+ writeBufferSize: 16384
+ readBufferSize: 256
+ forwarding:
+ maxSingleDelay: 5s
+ entryTTL: 6h
+ entryCheckInterval: 10m
+ maxTimerBatchSizePerWrite: 140
+ defaultStoragePolicies:
+ - 10s:2d
+ maxNumCachedSourceSets: 2
+ discardNaNAggregatedValues: true
+ entryPool:
+ size: 4096
+ counterElemPool:
+ size: 4096
+ timerElemPool:
+ size: 4096
+ gaugeElemPool:
+ size: 4096
diff --git a/scripts/docker-integration-tests/aggregator_legacy/m3coordinator.yml b/scripts/docker-integration-tests/aggregator_legacy/m3coordinator.yml
new file mode 100644
index 0000000000..475afe191f
--- /dev/null
+++ b/scripts/docker-integration-tests/aggregator_legacy/m3coordinator.yml
@@ -0,0 +1,98 @@
+listenAddress:
+ value: "0.0.0.0:7202"
+
+logging:
+ level: info
+
+metrics:
+ scope:
+ prefix: "coordinator"
+ prometheus:
+ handlerPath: /metrics
+ listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+
+tagOptions:
+ idScheme: quoted
+
+carbon:
+ ingester:
+ listenAddress: "0.0.0.0:7204"
+ rules:
+ - pattern: .*
+ aggregation:
+ type: mean
+ policies:
+ - resolution: 10s
+ retention: 6h
+
+clusters:
+ - namespaces:
+ - namespace: agg
+ type: aggregated
+ resolution: 10s
+ retention: 6h
+ - namespace: unagg
+ type: unaggregated
+ retention: 1s
+ client:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - dbnode01:2379
+ writeConsistencyLevel: majority
+ readConsistencyLevel: unstrict_majority
+
+downsample:
+ remoteAggregator:
+ client:
+ placementKV:
+ namespace: /placement
+ environment: override_test_env
+ placementWatcher:
+ key: m3aggregator
+ initWatchTimeout: 10s
+ hashType: murmur32
+ shardCutoffLingerDuration: 1m
+ flushSize: 1440
+ maxTimerBatchSize: 1120
+ queueSize: 10000
+ queueDropType: oldest
+ encoder:
+ initBufferSize: 2048
+ maxMessageSize: 10485760
+ bytesPool:
+ buckets:
+ - capacity: 2048
+ count: 4096
+ - capacity: 4096
+ count: 4096
+ watermark:
+ low: 0.7
+ high: 1.0
+ connection:
+ writeTimeout: 250ms
+
+ingest:
+ ingester:
+ workerPoolSize: 10000
+ opPool:
+ size: 10000
+ retry:
+ maxRetries: 3
+ jitter: true
+ logSampleRate: 0.01
+ m3msg:
+ server:
+ listenAddress: "0.0.0.0:7507"
+ retry:
+ maxBackoff: 10s
+ jitter: true
diff --git a/scripts/docker-integration-tests/aggregator_legacy/test.sh b/scripts/docker-integration-tests/aggregator_legacy/test.sh
new file mode 100755
index 0000000000..5a116ececd
--- /dev/null
+++ b/scripts/docker-integration-tests/aggregator_legacy/test.sh
@@ -0,0 +1,125 @@
+#!/usr/bin/env bash
+
+set -xe
+
+source $GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/common.sh
+REVISION=$(git rev-parse HEAD)
+COMPOSE_FILE=$GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/aggregator_legacy/docker-compose.yml
+export REVISION
+
+echo "Run m3dbnode"
+docker-compose -f ${COMPOSE_FILE} up -d dbnode01
+
+# Stop containers on exit
+METRIC_EMIT_PID="-1"
+function defer {
+ docker-compose -f ${COMPOSE_FILE} down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes
+ if [ "$METRIC_EMIT_PID" != "-1" ]; then
+ echo "Kill metric emit process"
+ kill $METRIC_EMIT_PID
+ fi
+}
+trap defer EXIT
+
+echo "Setup DB node"
+setup_single_m3db_node
+
+echo "Initializing aggregator topology"
+curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/services/m3aggregator/placement/init -d '{
+ "num_shards": 64,
+ "replication_factor": 2,
+ "instances": [
+ {
+ "id": "m3aggregator01",
+ "isolation_group": "availability-zone-a",
+ "zone": "embedded",
+ "weight": 100,
+ "endpoint": "m3aggregator01:6000",
+ "hostname": "m3aggregator01",
+ "port": 6000
+ },
+ {
+ "id": "m3aggregator02",
+ "isolation_group": "availability-zone-b",
+ "zone": "embedded",
+ "weight": 100,
+ "endpoint": "m3aggregator02:6000",
+ "hostname": "m3aggregator02",
+ "port": 6000
+ }
+ ]
+}'
+
+echo "Initializing m3msg topic for m3coordinator ingestion from m3aggregators"
+curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic/init -d '{
+ "numberOfShards": 64
+}'
+
+echo "Initializing m3coordinator topology"
+curl -vvvsSf -X POST localhost:7201/api/v1/services/m3coordinator/placement/init -d '{
+ "instances": [
+ {
+ "id": "m3coordinator01",
+ "zone": "embedded",
+ "endpoint": "m3coordinator01:7507",
+ "hostname": "m3coordinator01",
+ "port": 7507
+ }
+ ]
+}'
+echo "Done initializing m3coordinator topology"
+
+echo "Validating m3coordinator topology"
+[ "$(curl -sSf localhost:7201/api/v1/services/m3coordinator/placement | jq .placement.instances.m3coordinator01.id)" == '"m3coordinator01"' ]
+echo "Done validating topology"
+
+# Do this after placement for m3coordinator is created.
+echo "Adding m3coordinator as a consumer to the aggregator topic"
+curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/topic -d '{
+ "consumerService": {
+ "serviceId": {
+ "name": "m3coordinator",
+ "environment": "default_env",
+ "zone": "embedded"
+ },
+ "consumptionType": "SHARED",
+ "messageTtlNanos": "600000000000"
+ }
+}' # msgs will be discarded after 600000000000ns = 10mins
+
+echo "Running m3coordinator container"
+echo "> port 7202 is coordinator API"
+echo "> port 7203 is coordinator metrics"
+echo "> port 7204 is coordinator graphite ingest"
+echo "> port 7507 is coordinator m3msg ingest from aggregator ingest"
+docker-compose -f ${COMPOSE_FILE} up -d m3coordinator01
+COORDINATOR_API="localhost:7202"
+
+echo "Running m3aggregator containers"
+docker-compose -f ${COMPOSE_FILE} up -d m3aggregator01
+docker-compose -f ${COMPOSE_FILE} up -d m3aggregator02
+
+echo "Verifying aggregation with remote aggregators"
+
+function read_carbon {
+ target=$1
+ expected_val=$2
+ end=$(date +%s)
+ start=$(($end-1000))
+ RESPONSE=$(curl -sSfg "http://${COORDINATOR_API}/api/v1/graphite/render?target=$target&from=$start&until=$end")
+ test "$(echo "$RESPONSE" | jq ".[0].datapoints | .[][0] | select(. != null)" | tail -n 1)" = "$expected_val"
+ return $?
+}
+
+# Send metric values 40 and 44 every second
+echo "Sending unaggregated carbon metrics to m3coordinator"
+bash -c 'while true; do t=$(date +%s); echo "foo.bar.baz 40 $t" | nc 0.0.0.0 7204; echo "foo.bar.baz 44 $t" | nc 0.0.0.0 7204; sleep 1; done' &
+
+# Track PID to kill on exit
+METRIC_EMIT_PID="$!"
+
+# Read back the averaged averaged metric, we configured graphite
+# aggregation policy to average each tile and we are emitting
+# values 40 and 44 to get an average of 42 each tile
+echo "Read back aggregated averaged metric"
+ATTEMPTS=10 TIMEOUT=1 retry_with_backoff read_carbon foo.bar.* 42
diff --git a/scripts/docker-integration-tests/carbon/test.sh b/scripts/docker-integration-tests/carbon/test.sh
index 2bc019422c..d88c88a534 100755
--- a/scripts/docker-integration-tests/carbon/test.sh
+++ b/scripts/docker-integration-tests/carbon/test.sh
@@ -27,7 +27,7 @@ function read_carbon {
end=$(date +%s)
start=$(($end-1000))
RESPONSE=$(curl -sSfg "http://localhost:7201/api/v1/graphite/render?target=$target&from=$start&until=$end")
- test "$(echo "$RESPONSE" | jq ".[0].datapoints | .[][0] | select(. != null)" | tail -n 1)" = "$expected_val"
+ test "$(echo "$RESPONSE" | jq ".[0].datapoints | .[][0] | select(. != null)" | jq -s last)" = "$expected_val"
return $?
}
@@ -79,6 +79,11 @@ t=$(date +%s)
echo "foo.bar:baz.qux 42 $t" | nc 0.0.0.0 7204
ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff read_carbon 'foo.bar:*.*' 42
+# Test writing and reading IDs with a single element.
+t=$(date +%s)
+echo "quail 42 $t" | nc 0.0.0.0 7204
+ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff read_carbon 'quail' 42
+
t=$(date +%s)
echo "a 0 $t" | nc 0.0.0.0 7204
echo "a.bar 0 $t" | nc 0.0.0.0 7204
diff --git a/scripts/docker-integration-tests/coordinator_config_rules/test.sh b/scripts/docker-integration-tests/coordinator_config_rules/test.sh
index 5e6e12090e..5d1827d41b 100755
--- a/scripts/docker-integration-tests/coordinator_config_rules/test.sh
+++ b/scripts/docker-integration-tests/coordinator_config_rules/test.sh
@@ -43,7 +43,7 @@ function prometheus_remote_write {
local label2_value=${label2_value:-label2}
network_name="coordinator_config_rules"
- network=$(docker network ls | fgrep $network_name | tr -s ' ' | cut -f 1 -d ' ')
+ network=$(docker network ls | fgrep $network_name | tr -s ' ' | cut -f 1 -d ' ' | tail -n 1)
out=$((docker run -it --rm --network $network \
$PROMREMOTECLI_IMAGE \
-u http://coordinator01:7201/api/v1/prom/remote/write \
@@ -83,7 +83,7 @@ function prometheus_query_native {
result=$(curl -s \
-H "M3-Metrics-Type: ${metrics_type}" \
-H "M3-Storage-Policy: ${metrics_storage_policy}" \
- "0.0.0.0:7201/api/v1/${endpoint}?query=${query}${params_prefixed}" | jq -r "${jq_path}")
+ "0.0.0.0:7201/api/v1/${endpoint}?query=${query}${params_prefixed}" | jq -r "${jq_path}" | jq -s last)
test "$result" = "$expected_value"
return $?
}
diff --git a/scripts/docker-integration-tests/coordinator_noop/docker-compose.yml b/scripts/docker-integration-tests/coordinator_noop/docker-compose.yml
new file mode 100644
index 0000000000..ee8207bd26
--- /dev/null
+++ b/scripts/docker-integration-tests/coordinator_noop/docker-compose.yml
@@ -0,0 +1,42 @@
+version: "3.5"
+services:
+ coordinator01:
+ expose:
+ - "7201"
+ ports:
+ - "0.0.0.0:7201:7201"
+ networks:
+ - backend
+ image: "m3coordinator_integration:${REVISION}"
+ volumes:
+ - "./m3coordinator.yml:/etc/m3coordinator/m3coordinator.yml"
+ etcd01:
+ expose:
+ - "2379-2380"
+ ports:
+ - "0.0.0.0:2379-2380:2379-2380"
+ networks:
+ - backend
+ image: quay.io/coreos/etcd:v3.4.3
+ command:
+ - "etcd"
+ - "--name"
+ - "etcd01"
+ - "--listen-peer-urls"
+ - "http://0.0.0.0:2380"
+ - "--listen-client-urls"
+ - "http://0.0.0.0:2379"
+ - "--advertise-client-urls"
+ - "http://etcd01:2379"
+ - "--initial-cluster-token"
+ - "etcd-cluster-1"
+ - "--initial-advertise-peer-urls"
+ - "http://etcd01:2380"
+ - "--initial-cluster"
+ - "etcd01=http://etcd01:2380"
+ - "--initial-cluster-state"
+ - "new"
+ - "--data-dir"
+ - "/var/lib/etcd"
+networks:
+ backend:
diff --git a/scripts/docker-integration-tests/coordinator_noop/m3coordinator.yml b/scripts/docker-integration-tests/coordinator_noop/m3coordinator.yml
new file mode 100644
index 0000000000..e35b204202
--- /dev/null
+++ b/scripts/docker-integration-tests/coordinator_noop/m3coordinator.yml
@@ -0,0 +1,30 @@
+listenAddress:
+ value: "0.0.0.0:7201"
+
+logging:
+ level: info
+
+metrics:
+ scope:
+ prefix: "coordinator"
+ prometheus:
+ handlerPath: /metrics
+ listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+
+backend: noop-etcd
+clusterManagement:
+ etcd:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - etcd01:2379
+
+tagOptions:
+ idScheme: quoted
diff --git a/scripts/docker-integration-tests/coordinator_noop/test.sh b/scripts/docker-integration-tests/coordinator_noop/test.sh
new file mode 100755
index 0000000000..e6e244b11a
--- /dev/null
+++ b/scripts/docker-integration-tests/coordinator_noop/test.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+set -xe
+
+source $GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/common.sh
+REVISION=$(git rev-parse HEAD)
+SCRIPT_PATH=$GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/coordinator_noop
+COMPOSE_FILE=$SCRIPT_PATH/docker-compose.yml
+export REVISION
+
+echo "Run coordinator with no etcd"
+docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes coordinator01
+docker-compose -f ${COMPOSE_FILE} up -d --renew-anon-volumes etcd01
+
+function defer {
+ docker-compose -f ${COMPOSE_FILE} down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes
+}
+trap defer EXIT
+
+I=0
+RES=""
+while [[ "$I" -le 5 ]]; do
+ if curl -vvvsSf -X POST localhost:7201/api/v1/services/m3coordinator/placement/init -d '{
+ "instances": [
+ {
+ "id": "m3coordinator01",
+ "zone": "embedded",
+ "endpoint": "m3coordinator01:7507",
+ "hostname": "m3coordinator01",
+ "port": 7507
+ }
+ ]
+ }'; then
+ break
+ fi
+ # Need some time for coordinators to come up.
+ sleep 2
+ I=$((I+1))
+done
+
+if ! curl -vvvsSf localhost:7201/api/v1/services/m3coordinator/placement; then
+ echo "could not fetch existing placement"
+ exit 1
+fi
+
+QUERY_EXP='{"error":"operation not valid for noop client"}'
+RES=$(curl "localhost:7201/m3query/api/v1/query_range?start=$(date '+%s')&end=$(date '+%s')&step=10&query=foo")
+if [[ "$RES" != "$QUERY_EXP" ]]; then
+ echo "Expected resp '$QUERY_EXP', GOT '$RES'"
+ exit 1
+fi
diff --git a/scripts/docker-integration-tests/prometheus/m3coordinator.yml b/scripts/docker-integration-tests/prometheus/m3coordinator.yml
index 305f200b7f..6dbb0970d5 100644
--- a/scripts/docker-integration-tests/prometheus/m3coordinator.yml
+++ b/scripts/docker-integration-tests/prometheus/m3coordinator.yml
@@ -43,3 +43,12 @@ clusters:
tagOptions:
idScheme: quoted
+
+query:
+ restrictTags:
+ match:
+ - name: restricted_metrics_type
+ type: NOTEQUAL
+ value: hidden
+ strip:
+ - restricted_metrics_type
diff --git a/scripts/docker-integration-tests/prometheus/prometheus.yml b/scripts/docker-integration-tests/prometheus/prometheus.yml
index ce96174fe2..c11e4d7239 100644
--- a/scripts/docker-integration-tests/prometheus/prometheus.yml
+++ b/scripts/docker-integration-tests/prometheus/prometheus.yml
@@ -43,3 +43,6 @@ remote_read:
remote_write:
- url: http://coordinator01:7201/api/v1/prom/remote/write
+ write_relabel_configs:
+ - target_label: metrics_storage
+ replacement: m3db_remote
diff --git a/scripts/docker-integration-tests/prometheus/test-correctness.sh b/scripts/docker-integration-tests/prometheus/test-correctness.sh
index 4b25a55d41..c59d67a404 100755
--- a/scripts/docker-integration-tests/prometheus/test-correctness.sh
+++ b/scripts/docker-integration-tests/prometheus/test-correctness.sh
@@ -43,7 +43,7 @@ function test_instantaneous {
EXPECTED=$3
RESPONSE=$(curl -sSL "http://localhost:7201/api/v1/query?query=$QUERY")
ACTUAL_COUNT=$(echo $RESPONSE | jq '.data.result | length')
- ACTUAL=$(echo $RESPONSE | jq .data.result[].metric.foo | tr -d "\n")
+ ACTUAL=$(echo $RESPONSE | jq .data.result[].metric.foo | sort | tr -d "\n")
CONCAT=$(echo $EXPECTED | tr -d " ")
test $ACTUAL_COUNT = $EXPECTED_COUNT && test $ACTUAL = $CONCAT
}
@@ -63,7 +63,6 @@ function test_exists {
EXPECTED_EXISTS=$2
EXPECTED_NOT_EXISTS=$3
EXPECTED_COUNT=$4
- echo $QUERY "IS METRIC NAME"
RESPONSE=$(curl -sSL "http://localhost:7201/api/v1/query?query=$METRIC_NAME\{$QUERY\}")
ACTUAL_COUNT_EXISTS=$(echo $RESPONSE | jq .data.result[].metric.$EXPECTED_EXISTS | grep extra | wc -l)
ACTUAL_COUNT_NOT_EXISTS=$(echo $RESPONSE | jq .data.result[].metric.$EXPECTED_NOT_EXISTS | grep extra | wc -l)
@@ -95,8 +94,28 @@ function test_parse_threshold {
test $(echo $THRESHOLD | jq .query.name) = '"fetch"'
}
+function test_duplicates {
+ now=$(date +"%s")
+ start=$(( $now - 100 ))
+ end=$(( $now + 100 ))
+ QUERY="query=$METRIC_NAME&start=$start&end=$end&format=json"
+ ACTUAL=$(curl "localhost:7201/api/v1/prom/remote/read?$QUERY" | jq .[][].series[].tags[])
+ EXPECTED=$(echo '[ "__name__", "'${METRIC_NAME}'" ] [ "val", "extra" ] [ "val", "0" ]' | jq)
+ test "$ACTUAL"="$EXPECTED"
+}
+
+function test_debug_prom_returns_duplicates {
+ export METRIC_NAME="duplicate_$t"
+ # NB: this writes metrics of the form `duplicate_t{val="extra", val="1"}`
+ # with a duplicated `val` tag.
+ write_metrics 1 val
+ retry_with_backoff ATTEMPTS=3 TIMEOUT=1 test_duplicates
+}
+
function test_correctness {
test_parse_threshold
test_replace
test_empty_matcher
+
+ test_debug_prom_returns_duplicates
}
diff --git a/scripts/docker-integration-tests/prometheus/test.sh b/scripts/docker-integration-tests/prometheus/test.sh
index fc02f05d69..d1a3655794 100755
--- a/scripts/docker-integration-tests/prometheus/test.sh
+++ b/scripts/docker-integration-tests/prometheus/test.sh
@@ -9,8 +9,7 @@ COMPOSE_FILE=$GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/pro
# quay.io/m3db/prometheus_remote_client_golang @ v0.4.3
PROMREMOTECLI_IMAGE=quay.io/m3db/prometheus_remote_client_golang@sha256:fc56df819bff9a5a087484804acf3a584dd4a78c68900c31a28896ed66ca7e7b
JQ_IMAGE=realguess/jq:1.4@sha256:300c5d9fb1d74154248d155ce182e207cf6630acccbaadd0168e18b15bfaa786
-METRIC_NAME_TEST_TOO_OLD=foo
-METRIC_NAME_TEST_RESTRICT_WRITE=bar
+METRIC_NAME_TEST_RESTRICT_WRITE=bar_metric
export REVISION
echo "Pull containers required for test"
@@ -21,7 +20,14 @@ echo "Run m3dbnode and m3coordinator containers"
docker-compose -f ${COMPOSE_FILE} up -d dbnode01
docker-compose -f ${COMPOSE_FILE} up -d coordinator01
+TEST_SUCCESS=false
+
function defer {
+ if [[ "$TEST_SUCCESS" != "true" ]]; then
+ echo "Test failure, printing docker-compose logs"
+ docker-compose -f ${COMPOSE_FILE} logs
+ fi
+
docker-compose -f ${COMPOSE_FILE} down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes
}
trap defer EXIT
@@ -60,14 +66,26 @@ function prometheus_remote_write {
local expect_status_err=$7
local metrics_type=$8
local metrics_storage_policy=$9
+ local map_tags_header=${10}
+
+ local optional_tags=""
+ for i in $(seq 0 10); do
+ local optional_tag_name=$(eval "echo \$TAG_NAME_$i")
+ local optional_tag_value=$(eval "echo \$TAG_VALUE_$i")
+ if [[ "$optional_tag_name" != "" ]] || [[ "$optional_tag_value" != "" ]]; then
+ optional_tags="$optional_tags -t ${optional_tag_name}:${optional_tag_value}"
+ fi
+ done
- network=$(docker network ls --format '{{.ID}}' | tail -n 1)
+ network_name="prometheus"
+ network=$(docker network ls | fgrep $network_name | tr -s ' ' | cut -f 1 -d ' ' | tail -n 1)
out=$((docker run -it --rm --network $network \
$PROMREMOTECLI_IMAGE \
-u http://coordinator01:7201/api/v1/prom/remote/write \
- -t __name__:${metric_name} \
+ -t __name__:${metric_name} ${optional_tags} \
-h "M3-Metrics-Type: ${metrics_type}" \
-h "M3-Storage-Policy: ${metrics_storage_policy}" \
+ -h "M3-Map-Tags-JSON: ${map_tags_header}" \
-d ${datapoint_timestamp},${datapoint_value} | grep -v promremotecli_log) || true)
success=$(echo $out | grep -v promremotecli_log | docker run --rm -i $JQ_IMAGE jq .success)
status=$(echo $out | grep -v promremotecli_log | docker run --rm -i $JQ_IMAGE jq .statusCode)
@@ -83,13 +101,47 @@ function prometheus_remote_write {
return 0
}
+function test_prometheus_remote_write_empty_label_name_returns_400_status_code {
+ echo "Test write empty name for a label returns HTTP 400"
+ now=$(date +"%s")
+ TAG_NAME_0="non_empty_name" TAG_VALUE_0="foo" \
+ TAG_NAME_1="" TAG_VALUE_1="bar" \
+ prometheus_remote_write \
+ "foo_metric" $now 42 \
+ false "Expected request to fail" \
+ 400 "Expected request to return status code 400"
+}
+
+function test_prometheus_remote_write_empty_label_value_returns_400_status_code {
+ echo "Test write empty value for a label returns HTTP 400"
+ now=$(date +"%s")
+ TAG_NAME_0="foo" TAG_VALUE_0="bar" \
+ TAG_NAME_1="non_empty_name" TAG_VALUE_1="" \
+ prometheus_remote_write \
+ "foo_metric" $now 42 \
+ false "Expected request to fail" \
+ 400 "Expected request to return status code 400"
+}
+
+function test_prometheus_remote_write_duplicate_label_returns_400_status_code {
+ echo "Test write with duplicate labels returns HTTP 400"
+ now=$(date +"%s")
+ hour_ago=$(( now - 3600 ))
+ TAG_NAME_0="dupe_name" TAG_VALUE_0="foo" \
+ TAG_NAME_1="non_dupe_name" TAG_VALUE_1="bar" \
+ TAG_NAME_2="dupe_name" TAG_VALUE_2="baz" \
+ prometheus_remote_write \
+ "foo_metric" $now 42 \
+ false "Expected request to fail" \
+ 400 "Expected request to return status code 400"
+}
+
function test_prometheus_remote_write_too_old_returns_400_status_code {
- # Test writing too far into the past returns an HTTP 400 status code
echo "Test write into the past returns HTTP 400"
now=$(date +"%s")
hour_ago=$(( now - 3600 ))
prometheus_remote_write \
- $METRIC_NAME_TEST_TOO_OLD $hour_ago 3.142 \
+ "foo_metric" $hour_ago 3.142 \
false "Expected request to fail" \
400 "Expected request to return status code 400"
}
@@ -102,7 +154,7 @@ function test_prometheus_remote_write_restrict_metrics_type {
true "Expected request to succeed" \
200 "Expected request to return status code 200" \
unaggregated
-
+
echo "Test write with aggregated metrics type works as expected"
prometheus_remote_write \
$METRIC_NAME_TEST_RESTRICT_WRITE now 84.84 \
@@ -111,18 +163,63 @@ function test_prometheus_remote_write_restrict_metrics_type {
aggregated 15s:10h
}
+function test_prometheus_remote_write_map_tags {
+ echo "Test map tags header works as expected"
+ prometheus_remote_write \
+ $METRIC_NAME_TEST_RESTRICT_WRITE now 42.42 \
+ true "Expected request to succeed" \
+ 200 "Expected request to return status code 200" \
+ unaggregated "" '{"tagMappers":[{"write":{"tag":"globaltag","value":"somevalue"}}]}'
+
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 \
+ endpoint=query query="$METRIC_NAME_TEST_RESTRICT_WRITE" params="" \
+ metrics_type="unaggregated" jq_path=".data.result[0].metric.globaltag" expected_value="somevalue" \
+ retry_with_backoff prometheus_query_native
+}
+
function test_query_limits_applied {
- # Test the default series limit applied when directly querying
+ # Test the default series limit applied when directly querying
# coordinator (limit set to 100 in m3coordinator.yml)
+ # NB: ensure that the limit is not exceeded (it may be below limit).
echo "Test query limit with coordinator defaults"
ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
- '[[ $(curl -s 0.0.0.0:7201/api/v1/query?query=\\{__name__!=\"\"\\} | jq -r ".data.result | length") -eq 100 ]]'
+ '[[ $(curl -s 0.0.0.0:7201/api/v1/query?query=\\{metrics_storage=\"m3db_remote\"\\} | jq -r ".data.result | length") -lt 101 ]]'
+
+ # Test the series limit applied when directly querying
+ # coordinator (series limit set by header)
+ echo "Test query series limit with coordinator limit header"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s -H "M3-Limit-Max-Series: 10" 0.0.0.0:7201/api/v1/query?query=\\{metrics_storage=\"m3db_remote\"\\} | jq -r ".data.result | length") -eq 10 ]]'
+
+ echo "Test query series limit with require-exhaustive headers false"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s -H "M3-Limit-Max-Series: 2" -H "M3-Limit-Require-Exhaustive: false" 0.0.0.0:7201/api/v1/query?query=database_write_tagged_success | jq -r ".data.result | length") -eq 2 ]]'
- # Test the default series limit applied when directly querying
- # coordinator (limit set by header)
- echo "Test query limit with coordinator limit header"
+ echo "Test query series limit with require-exhaustive headers true (below limit therefore no error)"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s -H "M3-Limit-Max-Series: 4" -H "M3-Limit-Require-Exhaustive: true" 0.0.0.0:7201/api/v1/query?query=database_write_tagged_success | jq -r ".data.result | length") -eq 3 ]]'
+
+ echo "Test query series limit with require-exhaustive headers true (above limit therefore error)"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ -n $(curl -s -H "M3-Limit-Max-Series: 3" -H "M3-Limit-Require-Exhaustive: true" 0.0.0.0:7201/api/v1/query?query=database_write_tagged_success | jq ."error" | grep "query exceeded limit") ]]'
+
+ # Test the default docs limit applied when directly querying
+ # coordinator (docs limit set by header)
+ echo "Test query docs limit with coordinator limit header"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s -H "M3-Limit-Max-Docs: 1" 0.0.0.0:7201/api/v1/query?query=\\{metrics_storage=\"m3db_remote\"\\} | jq -r ".data.result | length") -lt 101 ]]'
+
+ echo "Test query docs limit with require-exhaustive headers false"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s -H "M3-Limit-Max-Docs: 1" -H "M3-Limit-Require-Exhaustive: false" 0.0.0.0:7201/api/v1/query?query=database_write_tagged_success | jq -r ".data.result | length") -eq 3 ]]'
+
+ echo "Test query docs limit with require-exhaustive headers true (below limit therefore no error)"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s -H "M3-Limit-Max-Docs: 4" -H "M3-Limit-Require-Exhaustive: true" 0.0.0.0:7201/api/v1/query?query=database_write_tagged_success | jq -r ".data.result | length") -eq 3 ]]'
+
+ echo "Test query docs limit with require-exhaustive headers true (above limit therefore error)"
ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
- '[[ $(curl -s -H "M3-Limit-Max-Series: 10" 0.0.0.0:7201/api/v1/query?query=\\{__name__!=\"\"\\} | jq -r ".data.result | length") -eq 10 ]]'
+ '[[ -n $(curl -s -H "M3-Limit-Max-Docs: 1" -H "M3-Limit-Require-Exhaustive: true" 0.0.0.0:7201/api/v1/query?query=database_write_tagged_success | jq ."error" | grep "query exceeded limit") ]]'
}
function prometheus_query_native {
@@ -142,7 +239,7 @@ function prometheus_query_native {
result=$(curl -s \
-H "M3-Metrics-Type: ${metrics_type}" \
-H "M3-Storage-Policy: ${metrics_storage_policy}" \
- "0.0.0.0:7201/api/v1/${endpoint}?query=${query}${params_prefixed}" | jq -r "${jq_path}")
+ "0.0.0.0:7201/m3query/api/v1/${endpoint}?query=${query}${params_prefixed}" | jq -r "${jq_path}" | head -1)
test "$result" = "$expected_value"
return $?
}
@@ -155,7 +252,7 @@ function test_query_restrict_metrics_type {
params_range="start=${hour_ago}"'&'"end=${now}"'&'"step=30s"
jq_path_instant=".data.result[0].value[1]"
jq_path_range=".data.result[0].values[][1]"
-
+
# Test restricting to unaggregated metrics
echo "Test query restrict to unaggregated metrics type (instant)"
ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 \
@@ -181,13 +278,71 @@ function test_query_restrict_metrics_type {
retry_with_backoff prometheus_query_native
}
+function test_query_restrict_tags {
+ # Test the default restrict tags is applied when directly querying
+ # coordinator (restrict tags set to hide any restricted_metrics_type="hidden"
+ # in m3coordinator.yml)
+
+ # First write some hidden metrics.
+ echo "Test write with unaggregated metrics type works as expected"
+ TAG_NAME_0="restricted_metrics_type" TAG_VALUE_0="hidden" \
+ TAG_NAME_1="foo_tag" TAG_VALUE_1="foo_tag_value" \
+ prometheus_remote_write \
+ some_hidden_metric now 42.42 \
+ true "Expected request to succeed" \
+ 200 "Expected request to return status code 200"
+
+ # Check that we can see them with zero restrictions applied as an
+ # override (we do this check first so that when we test that they
+ # don't appear by default we know that the metrics are already visible).
+ echo "Test restrict by tags with header override to remove restrict works"
+ ATTEMPTS=50 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s -H "M3-Restrict-By-Tags-JSON: {}" 0.0.0.0:7201/api/v1/query?query=\\{restricted_metrics_type=\"hidden\"\\} | jq -r ".data.result | length") -eq 1 ]]'
+
+ # Now test that the defaults will hide the metrics altogether.
+ echo "Test restrict by tags with coordinator defaults"
+ ATTEMPTS=5 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s 0.0.0.0:7201/api/v1/query?query=\\{restricted_metrics_type=\"hidden\"\\} | jq -r ".data.result | length") -eq 0 ]]'
+}
+
+function test_series {
+ # Test series search with start/end specified
+ ATTEMPTS=5 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s "0.0.0.0:7201/api/v1/series?match[]=prometheus_remote_storage_succeeded_samples_total&start=0&end=9999999999999.99999" | jq -r ".data | length") -eq 1 ]]'
+
+ # Test series search with no start/end specified
+ ATTEMPTS=5 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s "0.0.0.0:7201/api/v1/series?match[]=prometheus_remote_storage_succeeded_samples_total" | jq -r ".data | length") -eq 1 ]]'
+
+ # Test series search with min/max start time using the Prometheus Go
+ # min/max formatted timestamps, which is sent as part of a Prometheus
+ # remote query.
+ # minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
+ # maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
+ # minTimeFormatted = minTime.Format(time.RFC3339Nano)
+ # maxTimeFormatted = maxTime.Format(time.RFC3339Nano)
+ # Which:
+ # minTimeFormatted="-292273086-05-16T16:47:06Z"
+ # maxTimeFormatted="292277025-08-18T07:12:54.999999999Z"
+ ATTEMPTS=5 TIMEOUT=2 MAX_TIMEOUT=4 retry_with_backoff \
+ '[[ $(curl -s "0.0.0.0:7201/api/v1/series?match[]=prometheus_remote_storage_succeeded_samples_total&start=-292273086-05-16T16:47:06Z&end=292277025-08-18T07:12:54.999999999Z" | jq -r ".data | length") -eq 1 ]]'
+}
+
echo "Running prometheus tests"
test_prometheus_remote_read
test_prometheus_remote_write_multi_namespaces
+test_prometheus_remote_write_empty_label_name_returns_400_status_code
+test_prometheus_remote_write_empty_label_value_returns_400_status_code
+test_prometheus_remote_write_duplicate_label_returns_400_status_code
test_prometheus_remote_write_too_old_returns_400_status_code
test_prometheus_remote_write_restrict_metrics_type
test_query_limits_applied
test_query_restrict_metrics_type
+test_query_restrict_tags
+test_prometheus_remote_write_map_tags
+test_series
echo "Running function correctness tests"
test_correctness
+
+TEST_SUCCESS=true
diff --git a/scripts/docker-integration-tests/prometheus_replication/test.sh b/scripts/docker-integration-tests/prometheus_replication/test.sh
index cb857dfeb6..7d55c99c31 100755
--- a/scripts/docker-integration-tests/prometheus_replication/test.sh
+++ b/scripts/docker-integration-tests/prometheus_replication/test.sh
@@ -48,7 +48,8 @@ function prometheus_remote_write {
local expect_status=$6
local expect_status_err=$7
- network=$(docker network ls --format '{{.ID}}' | tail -n 1)
+ network_name="prometheus_replication"
+ network=$(docker network ls | fgrep $network_name | tr -s ' ' | cut -f 1 -d ' ' | tail -n 1)
out=$((docker run -it --rm --network $network \
$PROMREMOTECLI_IMAGE \
-u http://coordinator01:7201/api/v1/prom/remote/write \
diff --git a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml
index 3345e80a63..39879fc86d 100644
--- a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml
+++ b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml
@@ -58,3 +58,8 @@ carbon:
tagOptions:
idScheme: quoted
+
+# Use tag consolidation here; other integration tests handle id consolidations.
+query:
+ consolidation:
+ matchType: tags
\ No newline at end of file
diff --git a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml
index 769e3f8ffb..4d1dbcfce0 100644
--- a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml
+++ b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml
@@ -58,3 +58,7 @@ carbon:
tagOptions:
idScheme: quoted
+
+query:
+ consolidation:
+ matchType: tags
\ No newline at end of file
diff --git a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml
index 3924603001..adbf3ca63d 100644
--- a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml
+++ b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml
@@ -58,3 +58,7 @@ carbon:
tagOptions:
idScheme: quoted
+
+query:
+ consolidation:
+ matchType: tags
\ No newline at end of file
diff --git a/scripts/docker-integration-tests/query_fanout/restrict.go b/scripts/docker-integration-tests/query_fanout/restrict.go
index 320104de20..8f2bb7f8ee 100644
--- a/scripts/docker-integration-tests/query_fanout/restrict.go
+++ b/scripts/docker-integration-tests/query_fanout/restrict.go
@@ -47,8 +47,8 @@ func main() {
requireTrue(ts > 0, "no timestamp supplied")
name = fmt.Sprintf("foo_%d", ts)
- instant := fmt.Sprintf("http://0.0.0.0:7201/api/v1/query?query=%s", name)
- rnge := fmt.Sprintf("http://0.0.0.0:7201/api/v1/query_range?query=%s"+
+ instant := fmt.Sprintf("http://0.0.0.0:7201/m3query/api/v1/query?query=%s", name)
+ rnge := fmt.Sprintf("http://0.0.0.0:7201/m3query/api/v1/query_range?query=%s"+
"&start=%d&end=%d&step=100", name, ts/100*100, (ts/100+1)*100)
for _, url := range []string{instant, rnge} {
diff --git a/scripts/docker-integration-tests/query_fanout/warning.sh b/scripts/docker-integration-tests/query_fanout/warning.sh
index 080684d76c..b29b1b13f1 100755
--- a/scripts/docker-integration-tests/query_fanout/warning.sh
+++ b/scripts/docker-integration-tests/query_fanout/warning.sh
@@ -49,7 +49,7 @@ function write_metrics {
}'
done
set -x
-}
+}
function clean_headers {
rm $HEADER_FILE
@@ -58,13 +58,15 @@ function clean_headers {
function test_instant_query {
LIMIT=$1
EXPECTED=$2
- EXPECTED_HEADER=$3||""
+ ENDPOINT=$3||""
+ EXPECTED_HEADER=$4||""
trap clean_headers EXIT
RESPONSE=$(curl -sSL -D $HEADER_FILE -H "M3-Limit-Max-Series:$LIMIT" \
- "http://0.0.0.0:7201/api/v1/query?query=count($METRIC_NAME)")
+ "http://0.0.0.0:7201${ENDPOINT}/api/v1/query?query=count($METRIC_NAME)")
ACTUAL=$(echo $RESPONSE | jq .data.result[0].value[1] | tr -d \" | tr -d \')
ACTUAL_HEADER=$(cat $HEADER_FILE | grep M3-Results-Limited | cut -d' ' -f2 | tr -d "\r\n")
- test $ACTUAL = $EXPECTED && test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL" = "$EXPECTED"
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
t=$(date +%s)
@@ -82,13 +84,15 @@ function test_range_query {
"http://0.0.0.0:7201/api/v1/query_range?start=$start&end=$end&step=10&query=count($METRIC_NAME)")
ACTUAL=$(echo $RESPONSE | jq .data.result[0].values[0][1] | tr -d \" | tr -d \')
ACTUAL_HEADER=$(cat $HEADER_FILE | grep M3-Results-Limited | cut -d' ' -f2 | tr -d "\r\n")
- test $ACTUAL = $EXPECTED && test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL" = "$EXPECTED"
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
function test_search {
- start=$(date -d "$(date +%Y-%m-%dT%H:%M:%SZ) -1 minute" +%Y-%m-%dT%H:%M:%SZ)
- end=$(date -d "$(date +%Y-%m-%dT%H:%M:%SZ) +1 minute" +%Y-%m-%dT%H:%M:%SZ)
-
+ s=$(( $(date +%s) - 60 ))
+ start=$(date -r $s +%Y-%m-%dT%H:%M:%SZ)
+ e=$(( $(date +%s) + 60 ))
+ end=$(date -r $e +%Y-%m-%dT%H:%M:%SZ)
curl -D headers -X POST 0.0.0.0:7201/search -d '{
"start": "'$start'",
"end": "'$end'",
@@ -109,7 +113,7 @@ function test_search {
curl -sSL -D $HEADER_FILE -H "M3-Limit-Max-Series:$LIMIT" \
"http://0.0.0.0:7201/api/v1/search?query=val:.*"
ACTUAL_HEADER=$(cat $HEADER_FILE | grep M3-Results-Limited | cut -d' ' -f2 | tr -d "\r\n")
- test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
function test_labels {
@@ -120,7 +124,7 @@ function test_labels {
curl -sSL -D $HEADER_FILE -H "M3-Limit-Max-Series:$LIMIT" \
"http://0.0.0.0:7201/api/v1/labels"
ACTUAL_HEADER=$(cat $HEADER_FILE | grep M3-Results-Limited | cut -d' ' -f2 | tr -d "\r\n")
- test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
function test_match {
@@ -136,7 +140,8 @@ function test_match {
# NB: since it's not necessarily deterministic which series we get back from
# remote sources, check that we cot at least EXPECTED values, which will be
# the bottom bound.
- test $ACTUAL -ge $EXPECTED && test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL" -ge "$EXPECTED"
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
function test_label_values {
@@ -147,7 +152,7 @@ function test_label_values {
curl -sSL -D $HEADER_FILE -H "M3-Limit-Max-Series:$LIMIT" \
"http://0.0.0.0:7201/api/v1/label/val/values"
ACTUAL_HEADER=$(cat $HEADER_FILE | grep M3-Results-Limited | cut -d' ' -f2 | tr -d "\r\n")
- test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
function write_carbon {
@@ -178,7 +183,7 @@ function render_carbon {
LIMIT=$1
EXPECTED=$2
EXPECTED_HEADER=$3
- trap clean_headers EXIT
+ trap clean_headers EXIT
start=$(($t))
end=$(($start+200))
@@ -186,7 +191,8 @@ function render_carbon {
"http://localhost:7201/api/v1/graphite/render?target=countSeries($GRAPHITE.*.*)&from=$start&until=$end")
ACTUAL=$(echo $RESPONSE | jq .[0].datapoints[0][0])
ACTUAL_HEADER=$(cat $HEADER_FILE | grep M3-Results-Limited | cut -d' ' -f2 | tr -d "\r\n")
- test $ACTUAL = $EXPECTED && test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL" = "$EXPECTED"
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
function find_carbon {
@@ -197,7 +203,7 @@ function find_carbon {
RESPONSE=$(curl -sSL -D $HEADER_FILE -H "M3-Limit-Max-Series:$LIMIT" \
"http://localhost:7201/api/v1/graphite/metrics/find?query=$GRAPHITE.*")
ACTUAL_HEADER=$(cat $HEADER_FILE | grep M3-Results-Limited | cut -d' ' -f2 | tr -d "\r\n")
- test $ACTUAL_HEADER = $EXPECTED_HEADER
+ test "$ACTUAL_HEADER" = "$EXPECTED_HEADER"
}
function test_fanout_warning_fetch {
@@ -206,16 +212,24 @@ function test_fanout_warning_fetch {
# # write 5 metrics to cluster a
write_metrics coordinator-cluster-a 5
# unlimited query against cluster a has no header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 5
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 5 "/m3query"
# limited query against cluster a has header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 4 4 max_fetch_series_limit_applied
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 4 4 "/m3query" max_fetch_series_limit_applied
+ # unlimited query against cluster a has no header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 5 "/prometheus"
+ # limited query against cluster a has header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 4 4 "/prometheus" max_fetch_series_limit_applied
# write 10 metrics to cluster b
write_metrics coordinator-cluster-b 10
# unlimited query against cluster a has no header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 15
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 15 "/m3query"
+ # remote limited query against cluster a has header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 9 14 "/m3query" max_fetch_series_limit_applied
+ # unlimited query against cluster a has no header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 15 "/prometheus"
# remote limited query against cluster a has header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 9 14 max_fetch_series_limit_applied
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 9 14 "/prometheus" max_fetch_series_limit_applied
}
function test_fanout_warning_fetch_instantaneous {
@@ -242,14 +256,14 @@ function test_fanout_warning_search {
# write 5 metrics to cluster a
write_metrics coordinator-cluster-a 5
# unlimited query against cluster a has no header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 15
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 1000
# limited query against cluster a has header
ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 4 max_fetch_series_limit_applied
# write 10 metrics to cluster b
write_metrics coordinator-cluster-b 10
# unlimited query against cluster a has no header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 16
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 1000
# remote limited query against cluster a has header
ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 4 max_fetch_series_limit_applied
}
@@ -282,6 +296,31 @@ function test_fanout_warning_label_values {
ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_label_values 1 max_fetch_series_limit_applied
}
+function test_fanout_warning_fetch_id_mismatch {
+ METRIC_NAME="foo_$t"
+ export INSTANT_NAME=$METRIC_NAME
+ # # write 5 metrics to cluster a
+ write_metrics coordinator-cluster-a 5
+ # unlimited query against cluster a has no header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 5 "/m3query"
+ # limited query against cluster a has header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 4 4 "/m3query" max_fetch_series_limit_applied
+ # unlimited query against cluster a has no header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 5 "/prometheus"
+ # limited query against cluster a has header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 4 4 "/prometheus" max_fetch_series_limit_applied
+
+ # write 10 metrics to cluster b
+ write_metrics coordinator-cluster-b 10
+ # unlimited query against cluster a has no header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 15 "/m3query"
+ # remote limited query against cluster a has header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 9 14 "/m3query" max_fetch_series_limit_applied
+ # unlimited query against cluster a has no header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 15 "/prometheus"
+ # remote limited query against cluster a has header
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 9 14 "/prometheus" max_fetch_series_limit_applied
+}
function test_fanout_warning_graphite {
# Update write time as it will otherwise not be written correctly.
@@ -292,7 +331,7 @@ function test_fanout_warning_graphite {
ATTEMPTS=8 TIMEOUT=1 retry_with_backoff render_carbon 6 5
ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 6
# limited query against cluster a has header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff render_carbon 4 4 max_fetch_series_limit_applied
+ ATTEMPTS=8 TIMEOUT=1 retry_with_backoff render_carbon 4 4 max_fetch_series_limit_applied
ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 4 max_fetch_series_limit_applied
# Update write time as it will otherwise not be written correctly.
@@ -303,7 +342,7 @@ function test_fanout_warning_graphite {
ATTEMPTS=8 TIMEOUT=1 retry_with_backoff render_carbon 16 15
ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 16
# remote limited query against cluster a has header
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff render_carbon 9 14 max_fetch_series_limit_applied
+ ATTEMPTS=8 TIMEOUT=1 retry_with_backoff render_carbon 9 14 max_fetch_series_limit_applied
ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 9 max_fetch_series_limit_applied
}
@@ -311,32 +350,32 @@ function test_fanout_warning_missing_zone {
docker-compose -f ${COMPOSE_FILE} stop coordinator-cluster-c
METRIC_NAME=$INSTANT_NAME
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 15 remote_store_cluster-c_fetch_blocks_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 9 14 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_blocks_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 100 15 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_instant_query 9 14 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
METRIC_NAME=$RANGE_NAME
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_range_query 100 15 remote_store_cluster-c_fetch_blocks_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_range_query 9 14 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_blocks_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_range_query 100 15 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_range_query 9 14 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
METRIC_NAME=$SEARCH_NAME
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 16 remote_store_cluster-c_complete_tags_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 4 max_fetch_series_limit_applied,remote_store_cluster-c_complete_tags_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 1000 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_search 4 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_labels 100 remote_store_cluster-c_complete_tags_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_labels 1 max_fetch_series_limit_applied,remote_store_cluster-c_complete_tags_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_labels 100 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_labels 1 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
METRIC_NAME=$MATCH_NAME
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_match 11 10 remote_store_cluster-c_search_series_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_match 9 9 max_fetch_series_limit_applied,remote_store_cluster-c_search_series_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_match 11 10 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_match 9 9 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_label_values 100 remote_store_cluster-c_complete_tags_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_label_values 1 max_fetch_series_limit_applied,remote_store_cluster-c_complete_tags_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_label_values 100 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff test_label_values 1 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff render_carbon 16 15 remote_store_cluster-c_fetch_blocks_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff render_carbon 9 14 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_blocks_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff render_carbon 16 15 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff render_carbon 9 14 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 16 remote_store_cluster-c_complete_tags_warning
- ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 9 max_fetch_series_limit_applied,remote_store_cluster-c_complete_tags_warning
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 16 remote_store_cluster-c_fetch_data_error
+ ATTEMPTS=3 TIMEOUT=1 retry_with_backoff find_carbon 9 max_fetch_series_limit_applied,remote_store_cluster-c_fetch_data_error
docker-compose -f ${COMPOSE_FILE} start coordinator-cluster-c
}
@@ -348,8 +387,8 @@ function test_fanout_warnings {
test_fanout_warning_match
test_fanout_warning_labels
test_fanout_warning_label_values
+ test_fanout_warning_fetch_id_mismatch
export GRAPHITE="foo.bar.$t"
test_fanout_warning_graphite
test_fanout_warning_missing_zone
}
-
diff --git a/scripts/docker-integration-tests/run.sh b/scripts/docker-integration-tests/run.sh
index 3bbdbc5f12..12f08e87bf 100755
--- a/scripts/docker-integration-tests/run.sh
+++ b/scripts/docker-integration-tests/run.sh
@@ -9,12 +9,14 @@ TESTS=(
scripts/docker-integration-tests/prometheus_replication/test.sh
scripts/docker-integration-tests/carbon/test.sh
scripts/docker-integration-tests/aggregator/test.sh
+ scripts/docker-integration-tests/aggregator_legacy/test.sh
scripts/docker-integration-tests/query_fanout/test.sh
scripts/docker-integration-tests/repair/test.sh
scripts/docker-integration-tests/replication/test.sh
scripts/docker-integration-tests/repair_and_replication/test.sh
scripts/docker-integration-tests/multi_cluster_write/test.sh
scripts/docker-integration-tests/coordinator_config_rules/test.sh
+ scripts/docker-integration-tests/coordinator_noop/test.sh
)
# Some systems, including our default Buildkite hosts, don't come with netcat
@@ -54,7 +56,10 @@ for test in "${TESTS[@]}"; do
docker rm -f $(docker ps -aq) 2>/dev/null || true
echo "----------------------------------------------"
echo "running $test"
- "$test"
+ if ! $test; then
+ echo "--- :bk-status-failed: $test FAILED"
+ exit 1
+ fi
fi
ITER="$((ITER+1))"
done
diff --git a/scripts/docker-integration-tests/simple_v2_batch_apis/test.sh b/scripts/docker-integration-tests/simple_v2_batch_apis/test.sh
index 458efa8bc9..99cffe62a5 100755
--- a/scripts/docker-integration-tests/simple_v2_batch_apis/test.sh
+++ b/scripts/docker-integration-tests/simple_v2_batch_apis/test.sh
@@ -60,7 +60,8 @@ function prometheus_remote_write {
local metrics_type=$8
local metrics_storage_policy=$9
- network=$(docker network ls --format '{{.ID}}' | tail -n 1)
+ network_name="simple_v2_batch_apis"
+ network=$(docker network ls | fgrep $network_name | tr -s ' ' | cut -f 1 -d ' ' | tail -n 1)
out=$((docker run -it --rm --network $network \
$PROMREMOTECLI_IMAGE \
-u http://dbnode01:7201/api/v1/prom/remote/write \
diff --git a/scripts/proto-gen.sh b/scripts/proto-gen.sh
index ae350bb0fb..7c62959664 100755
--- a/scripts/proto-gen.sh
+++ b/scripts/proto-gen.sh
@@ -27,9 +27,11 @@ for i in "${GOPATH}/src/${PROTO_SRC}"/*; do
echo "generating from ${proto_files}"
# need the additional m3db_path mount in docker because it's a symlink on the CI.
m3db_path=$(realpath $GOPATH/src/github.com/m3db/m3)
+ resolve_protos="Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types"
+
docker run --rm -w /src -v $GOPATH/src:/src -v ${m3db_path}:/src/github.com/m3db/m3 \
$UID_FLAGS $PROTOC_IMAGE_VERSION \
- --gogofaster_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,plugins=grpc:/src \
+ --gogofaster_out=${resolve_protos},plugins=grpc:/src \
-I/src -I/src/github.com/m3db/m3/vendor ${proto_files}
fi
done
diff --git a/scripts/vagrant/provision/setup_unprivileged.sh b/scripts/vagrant/provision/setup_unprivileged.sh
index 1df2308450..0e1638f8a1 100755
--- a/scripts/vagrant/provision/setup_unprivileged.sh
+++ b/scripts/vagrant/provision/setup_unprivileged.sh
@@ -23,4 +23,4 @@ mv ${HOME}/go/bin/kind ${HOME}/bin
# Setup kubectl to use the kind config
echo '' >> ${HOME}/.bashrc
-echo 'export KUBECONFIG=${HOME}/.kube/kind-config-kind' >> ${HOME}/.bashrc
+echo 'export KUBECONFIG=${HOME}/.kube/config' >> ${HOME}/.bashrc
diff --git a/src/aggregator/aggregation/counter.go b/src/aggregator/aggregation/counter.go
index 9044ad791f..f739cc1a9e 100644
--- a/src/aggregator/aggregation/counter.go
+++ b/src/aggregator/aggregation/counter.go
@@ -22,6 +22,7 @@ package aggregation
import (
"math"
+ "time"
"github.com/m3db/m3/src/metrics/aggregation"
)
@@ -30,11 +31,12 @@ import (
type Counter struct {
Options
- sum int64
- sumSq int64
- count int64
- max int64
- min int64
+ lastAt time.Time
+ sum int64
+ sumSq int64
+ count int64
+ max int64
+ min int64
}
// NewCounter creates a new counter.
@@ -47,7 +49,16 @@ func NewCounter(opts Options) Counter {
}
// Update updates the counter value.
-func (c *Counter) Update(value int64) {
+func (c *Counter) Update(timestamp time.Time, value int64) {
+ if c.lastAt.IsZero() || timestamp.After(c.lastAt) {
+ // NB(r): Only set the last value if this value arrives
+ // after the wall clock timestamp of previous values, not
+ // the arrival time (i.e. order received).
+ c.lastAt = timestamp
+ } else {
+ c.Options.Metrics.Counter.IncValuesOutOfOrder()
+ }
+
c.sum += value
c.count++
@@ -63,6 +74,9 @@ func (c *Counter) Update(value int64) {
}
}
+// LastAt returns the time of the last value received.
+func (c *Counter) LastAt() time.Time { return c.lastAt }
+
// Count returns the number of values received.
func (c *Counter) Count() int64 { return c.count }
diff --git a/src/aggregator/aggregation/counter_test.go b/src/aggregator/aggregation/counter_test.go
index 7d8537812c..47492459fe 100644
--- a/src/aggregator/aggregation/counter_test.go
+++ b/src/aggregator/aggregation/counter_test.go
@@ -22,17 +22,19 @@ package aggregation
import (
"testing"
+ "time"
"github.com/m3db/m3/src/metrics/aggregation"
+ "github.com/m3db/m3/src/x/instrument"
"github.com/stretchr/testify/require"
)
func TestCounterDefaultAggregationType(t *testing.T) {
- c := NewCounter(NewOptions())
+ c := NewCounter(NewOptions(instrument.NewOptions()))
require.False(t, c.HasExpensiveAggregations)
for i := 1; i <= 100; i++ {
- c.Update(int64(i))
+ c.Update(time.Now(), int64(i))
}
require.Equal(t, int64(5050), c.Sum())
require.Equal(t, 5050.0, c.ValueOf(aggregation.Sum))
@@ -41,14 +43,14 @@ func TestCounterDefaultAggregationType(t *testing.T) {
}
func TestCounterCustomAggregationType(t *testing.T) {
- opts := NewOptions()
+ opts := NewOptions(instrument.NewOptions())
opts.HasExpensiveAggregations = true
c := NewCounter(opts)
require.True(t, c.HasExpensiveAggregations)
for i := 1; i <= 100; i++ {
- c.Update(int64(i))
+ c.Update(time.Now(), int64(i))
}
require.Equal(t, int64(5050), c.Sum())
for aggType := range aggregation.ValidTypes {
diff --git a/src/aggregator/aggregation/gauge.go b/src/aggregator/aggregation/gauge.go
index c67109c0fb..4e381c81c2 100644
--- a/src/aggregator/aggregation/gauge.go
+++ b/src/aggregator/aggregation/gauge.go
@@ -22,6 +22,7 @@ package aggregation
import (
"math"
+ "time"
"github.com/m3db/m3/src/metrics/aggregation"
)
@@ -34,12 +35,13 @@ const (
type Gauge struct {
Options
- last float64
- sum float64
- sumSq float64
- count int64
- max float64
- min float64
+ lastAt time.Time
+ last float64
+ sum float64
+ sumSq float64
+ count int64
+ max float64
+ min float64
}
// NewGauge creates a new gauge.
@@ -52,8 +54,16 @@ func NewGauge(opts Options) Gauge {
}
// Update updates the gauge value.
-func (g *Gauge) Update(value float64) {
- g.last = value
+func (g *Gauge) Update(timestamp time.Time, value float64) {
+ if g.lastAt.IsZero() || timestamp.After(g.lastAt) {
+ // NB(r): Only set the last value if this value arrives
+ // after the wall clock timestamp of previous values, not
+ // the arrival time (i.e. order received).
+ g.lastAt = timestamp
+ g.last = value
+ } else {
+ g.Options.Metrics.Gauge.IncValuesOutOfOrder()
+ }
g.sum += value
g.count++
@@ -69,6 +79,9 @@ func (g *Gauge) Update(value float64) {
}
}
+// LastAt returns the time of the last value received.
+func (g *Gauge) LastAt() time.Time { return g.lastAt }
+
// Last returns the last value received.
func (g *Gauge) Last() float64 { return g.last }
diff --git a/src/aggregator/aggregation/gauge_test.go b/src/aggregator/aggregation/gauge_test.go
index 38e9fb3087..bcf37e006f 100644
--- a/src/aggregator/aggregation/gauge_test.go
+++ b/src/aggregator/aggregation/gauge_test.go
@@ -22,17 +22,20 @@ package aggregation
import (
"testing"
+ "time"
"github.com/m3db/m3/src/metrics/aggregation"
+ "github.com/m3db/m3/src/x/instrument"
"github.com/stretchr/testify/require"
+ "github.com/uber-go/tally"
)
func TestGaugeDefaultAggregationType(t *testing.T) {
- g := NewGauge(NewOptions())
+ g := NewGauge(NewOptions(instrument.NewOptions()))
require.False(t, g.HasExpensiveAggregations)
for i := 1.0; i <= 100.0; i++ {
- g.Update(i)
+ g.Update(time.Now(), i)
}
require.Equal(t, 100.0, g.Last())
require.Equal(t, 100.0, g.ValueOf(aggregation.Last))
@@ -42,14 +45,14 @@ func TestGaugeDefaultAggregationType(t *testing.T) {
}
func TestGaugeCustomAggregationType(t *testing.T) {
- opts := NewOptions()
+ opts := NewOptions(instrument.NewOptions())
opts.HasExpensiveAggregations = true
g := NewGauge(opts)
require.True(t, g.HasExpensiveAggregations)
for i := 1; i <= 100; i++ {
- g.Update(float64(i))
+ g.Update(time.Now(), float64(i))
}
require.Equal(t, 100.0, g.Last())
@@ -78,3 +81,25 @@ func TestGaugeCustomAggregationType(t *testing.T) {
}
}
}
+
+func TestGaugeLastOutOfOrderValues(t *testing.T) {
+ scope := tally.NewTestScope("", nil)
+ g := NewGauge(NewOptions(instrument.NewOptions().SetMetricsScope(scope)))
+
+ timeMid := time.Now().Add(time.Minute)
+ timePre := timeMid.Add(-1 * time.Second)
+ timePrePre := timeMid.Add(-1 * time.Second)
+ timeAfter := timeMid.Add(time.Second)
+
+ g.Update(timeMid, 42)
+ g.Update(timePre, 41)
+ g.Update(timeAfter, 43)
+ g.Update(timePrePre, 40)
+
+ require.Equal(t, 43.0, g.Last())
+ snap := scope.Snapshot()
+ counters := snap.Counters()
+ counter, ok := counters["aggregation.gauges.values-out-of-order+"]
+ require.True(t, ok)
+ require.Equal(t, int64(2), counter.Value())
+}
diff --git a/src/aggregator/aggregation/options.go b/src/aggregator/aggregation/options.go
index 2b70ec8136..a6ca9f6f1e 100644
--- a/src/aggregator/aggregation/options.go
+++ b/src/aggregator/aggregation/options.go
@@ -20,10 +20,15 @@
package aggregation
-import "github.com/m3db/m3/src/metrics/aggregation"
+import (
+ "github.com/m3db/m3/src/metrics/aggregation"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/uber-go/tally"
+)
var (
- defaultOptions Options
+ defaultHasExpensiveAggregations = false
)
// Options is the options for aggregations.
@@ -31,11 +36,67 @@ type Options struct {
// HasExpensiveAggregations means expensive (multiplication/division)
// aggregation types are enabled.
HasExpensiveAggregations bool
+ // Metrics is as set of aggregation metrics.
+ Metrics Metrics
+}
+
+// Metrics is a set of metrics that can be used by elements.
+type Metrics struct {
+ Counter CounterMetrics
+ Gauge GaugeMetrics
+}
+
+// CounterMetrics is a set of counter metrics can be used by all counters.
+type CounterMetrics struct {
+ valuesOutOfOrder tally.Counter
+}
+
+// GaugeMetrics is a set of gauge metrics can be used by all gauges.
+type GaugeMetrics struct {
+ valuesOutOfOrder tally.Counter
+}
+
+// NewMetrics is a set of aggregation metrics.
+func NewMetrics(scope tally.Scope) Metrics {
+ scope = scope.SubScope("aggregation")
+ return Metrics{
+ Counter: newCounterMetrics(scope.SubScope("counters")),
+ Gauge: newGaugeMetrics(scope.SubScope("gauges")),
+ }
+}
+
+func newCounterMetrics(scope tally.Scope) CounterMetrics {
+ return CounterMetrics{
+ valuesOutOfOrder: scope.Counter("values-out-of-order"),
+ }
+}
+
+// IncValuesOutOfOrder increments value or if not initialized is a no-op.
+func (m CounterMetrics) IncValuesOutOfOrder() {
+ if m.valuesOutOfOrder != nil {
+ m.valuesOutOfOrder.Inc(1)
+ }
+}
+
+func newGaugeMetrics(scope tally.Scope) GaugeMetrics {
+ return GaugeMetrics{
+ valuesOutOfOrder: scope.Counter("values-out-of-order"),
+ }
+}
+
+// IncValuesOutOfOrder increments value or if not initialized is a no-op.
+func (m GaugeMetrics) IncValuesOutOfOrder() {
+ if m.valuesOutOfOrder != nil {
+ m.valuesOutOfOrder.Inc(1)
+ }
}
// NewOptions creates a new aggregation options.
-func NewOptions() Options {
- return defaultOptions
+func NewOptions(instrumentOpts instrument.Options) Options {
+ return Options{
+ HasExpensiveAggregations: defaultHasExpensiveAggregations,
+ Metrics: NewMetrics(instrumentOpts.MetricsScope()),
+ }
}
// ResetSetData resets the aggregation options.
diff --git a/src/aggregator/aggregation/options_test.go b/src/aggregator/aggregation/options_test.go
index 00156ada84..e216b045ef 100644
--- a/src/aggregator/aggregation/options_test.go
+++ b/src/aggregator/aggregation/options_test.go
@@ -24,12 +24,13 @@ import (
"testing"
"github.com/m3db/m3/src/metrics/aggregation"
+ "github.com/m3db/m3/src/x/instrument"
"github.com/stretchr/testify/require"
)
func TestOptions(t *testing.T) {
- o := NewOptions()
+ o := NewOptions(instrument.NewOptions())
require.False(t, o.HasExpensiveAggregations)
o.ResetSetData(nil)
diff --git a/src/aggregator/aggregation/timer.go b/src/aggregator/aggregation/timer.go
index 9ae2b2d122..6b7fc3d4f7 100644
--- a/src/aggregator/aggregation/timer.go
+++ b/src/aggregator/aggregation/timer.go
@@ -21,6 +21,8 @@
package aggregation
import (
+ "time"
+
"github.com/m3db/m3/src/aggregator/aggregation/quantile/cm"
"github.com/m3db/m3/src/metrics/aggregation"
)
@@ -29,6 +31,7 @@ import (
type Timer struct {
Options
+ lastAt time.Time
count int64 // Number of values received.
sum float64 // Sum of the values.
sumSq float64 // Sum of squared values.
@@ -46,7 +49,30 @@ func NewTimer(quantiles []float64, streamOpts cm.Options, opts Options) Timer {
}
// Add adds a timer value.
-func (t *Timer) Add(value float64) {
+func (t *Timer) Add(timestamp time.Time, value float64) {
+ t.recordLastAt(timestamp)
+ t.addValue(value)
+}
+
+// AddBatch adds a batch of timer values.
+func (t *Timer) AddBatch(timestamp time.Time, values []float64) {
+ // Record last at just once.
+ t.recordLastAt(timestamp)
+ for _, v := range values {
+ t.addValue(v)
+ }
+}
+
+func (t *Timer) recordLastAt(timestamp time.Time) {
+ if t.lastAt.IsZero() || timestamp.After(t.lastAt) {
+ // NB(r): Only set the last value if this value arrives
+ // after the wall clock timestamp of previous values, not
+ // the arrival time (i.e. order received).
+ t.lastAt = timestamp
+ }
+}
+
+func (t *Timer) addValue(value float64) {
t.count++
t.sum += value
t.stream.Add(value)
@@ -56,12 +82,8 @@ func (t *Timer) Add(value float64) {
}
}
-// AddBatch adds a batch of timer values.
-func (t *Timer) AddBatch(values []float64) {
- for _, v := range values {
- t.Add(v)
- }
-}
+// LastAt returns the time of the last value received.
+func (t *Timer) LastAt() time.Time { return t.lastAt }
// Quantile returns the value at a given quantile.
func (t *Timer) Quantile(q float64) float64 {
diff --git a/src/aggregator/aggregation/timer_benchmark_test.go b/src/aggregator/aggregation/timer_benchmark_test.go
index 08886f97d7..103c15951c 100644
--- a/src/aggregator/aggregation/timer_benchmark_test.go
+++ b/src/aggregator/aggregation/timer_benchmark_test.go
@@ -22,18 +22,21 @@ package aggregation
import (
"testing"
+ "time"
"github.com/m3db/m3/src/aggregator/aggregation/quantile/cm"
+ "github.com/m3db/m3/src/x/instrument"
)
func getTimer() Timer {
- opts := NewOptions()
+ opts := NewOptions(instrument.NewOptions())
opts.ResetSetData(testAggTypes)
+ at := time.Now()
timer := NewTimer(testQuantiles, cm.NewOptions(), opts)
for i := 1; i <= 100; i++ {
- timer.Add(float64(i))
+ timer.Add(at, float64(i))
}
return timer
}
diff --git a/src/aggregator/aggregation/timer_test.go b/src/aggregator/aggregation/timer_test.go
index 01db582e35..adc80336cc 100644
--- a/src/aggregator/aggregation/timer_test.go
+++ b/src/aggregator/aggregation/timer_test.go
@@ -23,9 +23,11 @@ package aggregation
import (
"math"
"testing"
+ "time"
"github.com/m3db/m3/src/aggregator/aggregation/quantile/cm"
"github.com/m3db/m3/src/metrics/aggregation"
+ "github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
"github.com/stretchr/testify/require"
@@ -56,21 +58,21 @@ func TestCreateTimerResetStream(t *testing.T) {
// Add a value to the timer and close the timer, which returns the
// underlying stream to the pool.
- timer := NewTimer(testQuantiles, streamOpts, NewOptions())
- timer.Add(1.0)
+ timer := NewTimer(testQuantiles, streamOpts, NewOptions(instrument.NewOptions()))
+ timer.Add(time.Now(), 1.0)
require.Equal(t, 1.0, timer.Min())
timer.Close()
// Create a new timer and assert the underlying stream has been closed.
- timer = NewTimer(testQuantiles, streamOpts, NewOptions())
- timer.Add(1.0)
+ timer = NewTimer(testQuantiles, streamOpts, NewOptions(instrument.NewOptions()))
+ timer.Add(time.Now(), 1.0)
require.Equal(t, 1.0, timer.Min())
timer.Close()
require.Equal(t, 0.0, timer.stream.Min())
}
func TestTimerAggregations(t *testing.T) {
- opts := NewOptions()
+ opts := NewOptions(instrument.NewOptions())
opts.ResetSetData(testAggTypes)
timer := NewTimer(testQuantiles, cm.NewOptions(), opts)
@@ -89,8 +91,9 @@ func TestTimerAggregations(t *testing.T) {
require.Equal(t, 0.0, timer.Quantile(0.99))
// Add values.
+ at := time.Now()
for i := 1; i <= 100; i++ {
- timer.Add(float64(i))
+ timer.Add(at, float64(i))
}
// Validate the timer values match expectations.
@@ -143,7 +146,7 @@ func TestTimerAggregations(t *testing.T) {
}
func TestTimerAggregationsNotExpensive(t *testing.T) {
- opts := NewOptions()
+ opts := NewOptions(instrument.NewOptions())
opts.ResetSetData(aggregation.Types{aggregation.Sum})
timer := NewTimer(testQuantiles, cm.NewOptions(), opts)
@@ -152,8 +155,9 @@ func TestTimerAggregationsNotExpensive(t *testing.T) {
require.False(t, timer.HasExpensiveAggregations)
// Add values.
+ at := time.Now()
for i := 1; i <= 100; i++ {
- timer.Add(float64(i))
+ timer.Add(at, float64(i))
}
// All Non expensive calculations should be performed.
diff --git a/src/aggregator/aggregator/aggregation.go b/src/aggregator/aggregator/aggregation.go
index b1d3f7a4ac..0f51d89eb7 100644
--- a/src/aggregator/aggregator/aggregation.go
+++ b/src/aggregator/aggregator/aggregation.go
@@ -21,6 +21,8 @@
package aggregator
import (
+ "time"
+
"github.com/m3db/m3/src/aggregator/aggregation"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
)
@@ -34,23 +36,44 @@ func newCounterAggregation(c aggregation.Counter) counterAggregation {
return counterAggregation{Counter: c}
}
-func (c *counterAggregation) Add(value float64) { c.Counter.Update(int64(value)) }
-func (c *counterAggregation) AddUnion(mu unaggregated.MetricUnion) { c.Counter.Update(mu.CounterVal) }
+func (a *counterAggregation) Add(t time.Time, value float64) {
+ a.Counter.Update(t, int64(value))
+}
+
+func (a *counterAggregation) AddUnion(t time.Time, mu unaggregated.MetricUnion) {
+ a.Counter.Update(t, mu.CounterVal)
+}
// timerAggregation is a timer aggregation.
type timerAggregation struct {
aggregation.Timer
}
-func newTimerAggregation(t aggregation.Timer) timerAggregation { return timerAggregation{Timer: t} }
-func (t *timerAggregation) Add(value float64) { t.Timer.Add(value) }
-func (t *timerAggregation) AddUnion(mu unaggregated.MetricUnion) { t.Timer.AddBatch(mu.BatchTimerVal) }
+func newTimerAggregation(t aggregation.Timer) timerAggregation {
+ return timerAggregation{Timer: t}
+}
+
+func (a *timerAggregation) Add(timestamp time.Time, value float64) {
+ a.Timer.Add(timestamp, value)
+}
+
+func (a *timerAggregation) AddUnion(timestamp time.Time, mu unaggregated.MetricUnion) {
+ a.Timer.AddBatch(timestamp, mu.BatchTimerVal)
+}
// gaugeAggregation is a gauge aggregation.
type gaugeAggregation struct {
aggregation.Gauge
}
-func newGaugeAggregation(g aggregation.Gauge) gaugeAggregation { return gaugeAggregation{Gauge: g} }
-func (g *gaugeAggregation) Add(value float64) { g.Gauge.Update(value) }
-func (g *gaugeAggregation) AddUnion(mu unaggregated.MetricUnion) { g.Gauge.Update(mu.GaugeVal) }
+func newGaugeAggregation(g aggregation.Gauge) gaugeAggregation {
+ return gaugeAggregation{Gauge: g}
+}
+
+func (a *gaugeAggregation) Add(t time.Time, value float64) {
+ a.Gauge.Update(t, value)
+}
+
+func (a *gaugeAggregation) AddUnion(t time.Time, mu unaggregated.MetricUnion) {
+ a.Gauge.Update(t, mu.GaugeVal)
+}
diff --git a/src/aggregator/aggregator/aggregation_test.go b/src/aggregator/aggregator/aggregation_test.go
index b6ff036038..8c7b428268 100644
--- a/src/aggregator/aggregator/aggregation_test.go
+++ b/src/aggregator/aggregator/aggregation_test.go
@@ -22,11 +22,13 @@ package aggregator
import (
"testing"
+ "time"
"github.com/m3db/m3/src/aggregator/aggregation"
"github.com/m3db/m3/src/aggregator/aggregation/quantile/cm"
"github.com/m3db/m3/src/metrics/metric"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/x/instrument"
"github.com/stretchr/testify/require"
)
@@ -53,54 +55,54 @@ var (
)
func TestCounterAggregationAdd(t *testing.T) {
- c := newCounterAggregation(aggregation.NewCounter(aggregation.NewOptions()))
+ c := newCounterAggregation(aggregation.NewCounter(aggregation.NewOptions(instrument.NewOptions())))
for _, v := range testAggregationValues {
- c.Add(v)
+ c.Add(time.Now(), v)
}
require.Equal(t, int64(4), c.Count())
require.Equal(t, int64(799), c.Sum())
}
func TestCounterAggregationAddUnion(t *testing.T) {
- c := newCounterAggregation(aggregation.NewCounter(aggregation.NewOptions()))
+ c := newCounterAggregation(aggregation.NewCounter(aggregation.NewOptions(instrument.NewOptions())))
for _, v := range testAggregationUnions {
- c.AddUnion(v)
+ c.AddUnion(time.Now(), v)
}
require.Equal(t, int64(3), c.Count())
require.Equal(t, int64(1234), c.Sum())
}
func TestTimerAggregationAdd(t *testing.T) {
- tm := newTimerAggregation(aggregation.NewTimer([]float64{0.5}, cm.NewOptions(), aggregation.NewOptions()))
+ tm := newTimerAggregation(aggregation.NewTimer([]float64{0.5}, cm.NewOptions(), aggregation.NewOptions(instrument.NewOptions())))
for _, v := range testAggregationValues {
- tm.Add(v)
+ tm.Add(time.Now(), v)
}
require.Equal(t, int64(4), tm.Count())
require.Equal(t, 799.2, tm.Sum())
}
func TestTimerAggregationAddUnion(t *testing.T) {
- tm := newTimerAggregation(aggregation.NewTimer([]float64{0.5}, cm.NewOptions(), aggregation.NewOptions()))
+ tm := newTimerAggregation(aggregation.NewTimer([]float64{0.5}, cm.NewOptions(), aggregation.NewOptions(instrument.NewOptions())))
for _, v := range testAggregationUnions {
- tm.AddUnion(v)
+ tm.AddUnion(time.Now(), v)
}
require.Equal(t, int64(5), tm.Count())
require.Equal(t, 18.0, tm.Sum())
}
func TestGaugeAggregationAdd(t *testing.T) {
- g := newGaugeAggregation(aggregation.NewGauge(aggregation.NewOptions()))
+ g := newGaugeAggregation(aggregation.NewGauge(aggregation.NewOptions(instrument.NewOptions())))
for _, v := range testAggregationValues {
- g.Add(v)
+ g.Add(time.Now(), v)
}
require.Equal(t, int64(4), g.Count())
require.Equal(t, 799.2, g.Sum())
}
func TestGaugeAggregationAddUnion(t *testing.T) {
- g := newGaugeAggregation(aggregation.NewGauge(aggregation.NewOptions()))
+ g := newGaugeAggregation(aggregation.NewGauge(aggregation.NewOptions(instrument.NewOptions())))
for _, v := range testAggregationUnions {
- g.AddUnion(v)
+ g.AddUnion(time.Now(), v)
}
require.Equal(t, int64(3), g.Count())
require.Equal(t, 123.456, g.Sum())
diff --git a/src/aggregator/aggregator/aggregator.go b/src/aggregator/aggregator/aggregator.go
index 99406e59e1..5e0df44e9c 100644
--- a/src/aggregator/aggregator/aggregator.go
+++ b/src/aggregator/aggregator/aggregator.go
@@ -30,6 +30,7 @@ import (
"time"
"github.com/m3db/m3/src/aggregator/aggregator/handler"
+ "github.com/m3db/m3/src/aggregator/aggregator/handler/writer"
"github.com/m3db/m3/src/aggregator/client"
"github.com/m3db/m3/src/aggregator/sharding"
"github.com/m3db/m3/src/cluster/placement"
@@ -39,6 +40,7 @@ import (
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/id"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/x/clock"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/instrument"
@@ -71,9 +73,15 @@ type Aggregator interface {
// AddTimed adds a timed metric with metadata.
AddTimed(metric aggregated.Metric, metadata metadata.TimedMetadata) error
+ // AddTimedWithStagedMetadatas adds a timed metric with staged metadatas.
+ AddTimedWithStagedMetadatas(metric aggregated.Metric, metas metadata.StagedMetadatas) error
+
// AddForwarded adds a forwarded metric with metadata.
AddForwarded(metric aggregated.ForwardedMetric, metadata metadata.ForwardMetadata) error
+ // AddPassthrough adds a passthrough metric with storage policy.
+ AddPassthrough(metric aggregated.Metric, storagePolicy policy.StoragePolicy) error
+
// Resign stops the aggregator from participating in leader election and resigns
// from ongoing campaign if any.
Resign() error
@@ -100,6 +108,7 @@ type aggregator struct {
electionManager ElectionManager
flushManager FlushManager
flushHandler handler.Handler
+ passthroughWriter writer.Writer
adminClient client.AdminClient
resignTimeout time.Duration
@@ -122,7 +131,7 @@ type aggregator struct {
func NewAggregator(opts Options) Aggregator {
iOpts := opts.InstrumentOptions()
scope := iOpts.MetricsScope()
- samplingRate := iOpts.MetricsSamplingRate()
+ timerOpts := iOpts.TimerOptions()
return &aggregator{
opts: opts,
nowFn: opts.ClockOptions().NowFn(),
@@ -134,11 +143,12 @@ func NewAggregator(opts Options) Aggregator {
electionManager: opts.ElectionManager(),
flushManager: opts.FlushManager(),
flushHandler: opts.FlushHandler(),
+ passthroughWriter: opts.PassthroughWriter(),
adminClient: opts.AdminClient(),
resignTimeout: opts.ResignTimeout(),
doneCh: make(chan struct{}),
sleepFn: time.Sleep,
- metrics: newAggregatorMetrics(scope, samplingRate, opts.MaxAllowedForwardingDelayFn()),
+ metrics: newAggregatorMetrics(scope, timerOpts, opts.MaxAllowedForwardingDelayFn()),
logger: iOpts.Logger(),
}
}
@@ -209,6 +219,25 @@ func (agg *aggregator) AddTimed(
return nil
}
+func (agg *aggregator) AddTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ metas metadata.StagedMetadatas,
+) error {
+ callStart := agg.nowFn()
+ agg.metrics.timed.Inc(1)
+ shard, err := agg.shardFor(metric.ID)
+ if err != nil {
+ agg.metrics.addTimed.ReportError(err)
+ return err
+ }
+ if err = shard.AddTimedWithStagedMetadatas(metric, metas); err != nil {
+ agg.metrics.addTimed.ReportError(err)
+ return err
+ }
+ agg.metrics.addTimed.ReportSuccess(agg.nowFn().Sub(callStart))
+ return nil
+}
+
func (agg *aggregator) AddForwarded(
metric aggregated.ForwardedMetric,
metadata metadata.ForwardMetadata,
@@ -235,6 +264,43 @@ func (agg *aggregator) AddForwarded(
return nil
}
+func (agg *aggregator) AddPassthrough(
+ metric aggregated.Metric,
+ storagePolicy policy.StoragePolicy,
+) error {
+ callStart := agg.nowFn()
+ agg.metrics.passthrough.Inc(1)
+
+ if agg.electionManager.ElectionState() == FollowerState {
+ agg.metrics.addPassthrough.ReportFollowerNoop()
+ return nil
+ }
+
+ pw, err := agg.passWriter()
+ if err != nil {
+ agg.metrics.addPassthrough.ReportError(err)
+ return err
+ }
+
+ mp := aggregated.ChunkedMetricWithStoragePolicy{
+ ChunkedMetric: aggregated.ChunkedMetric{
+ ChunkedID: id.ChunkedID{
+ Data: []byte(metric.ID),
+ },
+ TimeNanos: metric.TimeNanos,
+ Value: metric.Value,
+ },
+ StoragePolicy: storagePolicy,
+ }
+
+ if err := pw.Write(mp); err != nil {
+ agg.metrics.addPassthrough.ReportError(err)
+ return err
+ }
+ agg.metrics.addPassthrough.ReportSuccess(agg.nowFn().Sub(callStart))
+ return nil
+}
+
func (agg *aggregator) Resign() error {
ctx, cancel := context.WithTimeout(context.Background(), agg.resignTimeout)
defer cancel()
@@ -262,6 +328,7 @@ func (agg *aggregator) Close() error {
agg.closeShardSetWithLock()
}
agg.flushHandler.Close()
+ agg.passthroughWriter.Close()
if agg.adminClient != nil {
agg.adminClient.Close()
}
@@ -269,6 +336,21 @@ func (agg *aggregator) Close() error {
return nil
}
+func (agg *aggregator) passWriter() (writer.Writer, error) {
+ agg.RLock()
+ defer agg.RUnlock()
+
+ if agg.state != aggregatorOpen {
+ return nil, errAggregatorNotOpenOrClosed
+ }
+
+ if agg.electionManager.ElectionState() == FollowerState {
+ return writer.NewBlackholeWriter(), nil
+ }
+
+ return agg.passthroughWriter, nil
+}
+
func (agg *aggregator) shardFor(id id.RawID) (*aggregatorShard, error) {
agg.RLock()
shard, err := agg.shardForWithLock(id, noUpdateShards)
@@ -631,11 +713,11 @@ type aggregatorAddMetricMetrics struct {
func newAggregatorAddMetricMetrics(
scope tally.Scope,
- samplingRate float64,
+ opts instrument.TimerOptions,
) aggregatorAddMetricMetrics {
return aggregatorAddMetricMetrics{
success: scope.Counter("success"),
- successLatency: instrument.MustCreateSampledTimer(scope.Timer("success-latency"), samplingRate),
+ successLatency: instrument.NewTimer(scope, "success-latency", opts),
shardNotOwned: scope.Tagged(map[string]string{
"reason": "shard-not-owned",
}).Counter("errors"),
@@ -685,10 +767,10 @@ type aggregatorAddUntimedMetrics struct {
func newAggregatorAddUntimedMetrics(
scope tally.Scope,
- samplingRate float64,
+ opts instrument.TimerOptions,
) aggregatorAddUntimedMetrics {
return aggregatorAddUntimedMetrics{
- aggregatorAddMetricMetrics: newAggregatorAddMetricMetrics(scope, samplingRate),
+ aggregatorAddMetricMetrics: newAggregatorAddMetricMetrics(scope, opts),
invalidMetricTypes: scope.Tagged(map[string]string{
"reason": "invalid-metric-types",
}).Counter("errors"),
@@ -712,10 +794,10 @@ type aggregatorAddTimedMetrics struct {
func newAggregatorAddTimedMetrics(
scope tally.Scope,
- samplingRate float64,
+ opts instrument.TimerOptions,
) aggregatorAddTimedMetrics {
return aggregatorAddTimedMetrics{
- aggregatorAddMetricMetrics: newAggregatorAddMetricMetrics(scope, samplingRate),
+ aggregatorAddMetricMetrics: newAggregatorAddMetricMetrics(scope, opts),
tooFarInTheFuture: scope.Tagged(map[string]string{
"reason": "too-far-in-the-future",
}).Counter("errors"),
@@ -738,6 +820,29 @@ func (m *aggregatorAddTimedMetrics) ReportError(err error) {
}
}
+type aggregatorAddPassthroughMetrics struct {
+ aggregatorAddMetricMetrics
+ followerNoop tally.Counter
+}
+
+func newAggregatorAddPassthroughMetrics(
+ scope tally.Scope,
+ opts instrument.TimerOptions,
+) aggregatorAddPassthroughMetrics {
+ return aggregatorAddPassthroughMetrics{
+ aggregatorAddMetricMetrics: newAggregatorAddMetricMetrics(scope, opts),
+ followerNoop: scope.Counter("follower-noop"),
+ }
+}
+
+func (m *aggregatorAddPassthroughMetrics) ReportError(err error) {
+ m.aggregatorAddMetricMetrics.ReportError(err)
+}
+
+func (m *aggregatorAddPassthroughMetrics) ReportFollowerNoop() {
+ m.followerNoop.Inc(1)
+}
+
type latencyBucketKey struct {
resolution time.Duration
numForwardedTimes int
@@ -754,11 +859,11 @@ type aggregatorAddForwardedMetrics struct {
func newAggregatorAddForwardedMetrics(
scope tally.Scope,
- samplingRate float64,
+ opts instrument.TimerOptions,
maxAllowedForwardingDelayFn MaxAllowedForwardingDelayFn,
) aggregatorAddForwardedMetrics {
return aggregatorAddForwardedMetrics{
- aggregatorAddMetricMetrics: newAggregatorAddMetricMetrics(scope, samplingRate),
+ aggregatorAddMetricMetrics: newAggregatorAddMetricMetrics(scope, opts),
scope: scope,
maxAllowedForwardingDelayFn: maxAllowedForwardingDelayFn,
forwardingLatency: make(map[latencyBucketKey]tally.Histogram),
@@ -908,47 +1013,52 @@ func newAggregatorShardSetIDMetrics(scope tally.Scope) aggregatorShardSetIDMetri
}
type aggregatorMetrics struct {
- counters tally.Counter
- timers tally.Counter
- timerBatches tally.Counter
- gauges tally.Counter
- forwarded tally.Counter
- timed tally.Counter
- addUntimed aggregatorAddUntimedMetrics
- addTimed aggregatorAddTimedMetrics
- addForwarded aggregatorAddForwardedMetrics
- placement aggregatorPlacementMetrics
- shards aggregatorShardsMetrics
- shardSetID aggregatorShardSetIDMetrics
- tick aggregatorTickMetrics
+ counters tally.Counter
+ timers tally.Counter
+ timerBatches tally.Counter
+ gauges tally.Counter
+ forwarded tally.Counter
+ timed tally.Counter
+ passthrough tally.Counter
+ addUntimed aggregatorAddUntimedMetrics
+ addTimed aggregatorAddTimedMetrics
+ addForwarded aggregatorAddForwardedMetrics
+ addPassthrough aggregatorAddPassthroughMetrics
+ placement aggregatorPlacementMetrics
+ shards aggregatorShardsMetrics
+ shardSetID aggregatorShardSetIDMetrics
+ tick aggregatorTickMetrics
}
func newAggregatorMetrics(
scope tally.Scope,
- samplingRate float64,
+ opts instrument.TimerOptions,
maxAllowedForwardingDelayFn MaxAllowedForwardingDelayFn,
) aggregatorMetrics {
addUntimedScope := scope.SubScope("addUntimed")
addTimedScope := scope.SubScope("addTimed")
addForwardedScope := scope.SubScope("addForwarded")
+ addPassthroughScope := scope.SubScope("addPassthrough")
placementScope := scope.SubScope("placement")
shardsScope := scope.SubScope("shards")
shardSetIDScope := scope.SubScope("shard-set-id")
tickScope := scope.SubScope("tick")
return aggregatorMetrics{
- counters: scope.Counter("counters"),
- timers: scope.Counter("timers"),
- timerBatches: scope.Counter("timer-batches"),
- gauges: scope.Counter("gauges"),
- forwarded: scope.Counter("forwarded"),
- timed: scope.Counter("timed"),
- addUntimed: newAggregatorAddUntimedMetrics(addUntimedScope, samplingRate),
- addTimed: newAggregatorAddTimedMetrics(addTimedScope, samplingRate),
- addForwarded: newAggregatorAddForwardedMetrics(addForwardedScope, samplingRate, maxAllowedForwardingDelayFn),
- placement: newAggregatorPlacementMetrics(placementScope),
- shards: newAggregatorShardsMetrics(shardsScope),
- shardSetID: newAggregatorShardSetIDMetrics(shardSetIDScope),
- tick: newAggregatorTickMetrics(tickScope),
+ counters: scope.Counter("counters"),
+ timers: scope.Counter("timers"),
+ timerBatches: scope.Counter("timer-batches"),
+ gauges: scope.Counter("gauges"),
+ forwarded: scope.Counter("forwarded"),
+ timed: scope.Counter("timed"),
+ passthrough: scope.Counter("passthrough"),
+ addUntimed: newAggregatorAddUntimedMetrics(addUntimedScope, opts),
+ addTimed: newAggregatorAddTimedMetrics(addTimedScope, opts),
+ addForwarded: newAggregatorAddForwardedMetrics(addForwardedScope, opts, maxAllowedForwardingDelayFn),
+ addPassthrough: newAggregatorAddPassthroughMetrics(addPassthroughScope, opts),
+ placement: newAggregatorPlacementMetrics(placementScope),
+ shards: newAggregatorShardsMetrics(shardsScope),
+ shardSetID: newAggregatorShardSetIDMetrics(shardSetIDScope),
+ tick: newAggregatorTickMetrics(tickScope),
}
}
diff --git a/src/aggregator/aggregator/aggregator_mock.go b/src/aggregator/aggregator/aggregator_mock.go
index 2f52191a6f..5107239a52 100644
--- a/src/aggregator/aggregator/aggregator_mock.go
+++ b/src/aggregator/aggregator/aggregator_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/m3db/m3/src/aggregator/aggregator (interfaces: ElectionManager,FlushTimesManager,PlacementManager)
+// Source: github.com/m3db/m3/src/aggregator/aggregator (interfaces: Aggregator,ElectionManager,FlushTimesManager,PlacementManager)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -31,11 +31,164 @@ import (
"github.com/m3db/m3/src/aggregator/generated/proto/flush"
"github.com/m3db/m3/src/cluster/placement"
"github.com/m3db/m3/src/cluster/shard"
+ "github.com/m3db/m3/src/metrics/metadata"
+ "github.com/m3db/m3/src/metrics/metric/aggregated"
+ "github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/x/watch"
"github.com/golang/mock/gomock"
)
+// MockAggregator is a mock of Aggregator interface
+type MockAggregator struct {
+ ctrl *gomock.Controller
+ recorder *MockAggregatorMockRecorder
+}
+
+// MockAggregatorMockRecorder is the mock recorder for MockAggregator
+type MockAggregatorMockRecorder struct {
+ mock *MockAggregator
+}
+
+// NewMockAggregator creates a new mock instance
+func NewMockAggregator(ctrl *gomock.Controller) *MockAggregator {
+ mock := &MockAggregator{ctrl: ctrl}
+ mock.recorder = &MockAggregatorMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockAggregator) EXPECT() *MockAggregatorMockRecorder {
+ return m.recorder
+}
+
+// AddForwarded mocks base method
+func (m *MockAggregator) AddForwarded(arg0 aggregated.ForwardedMetric, arg1 metadata.ForwardMetadata) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddForwarded", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddForwarded indicates an expected call of AddForwarded
+func (mr *MockAggregatorMockRecorder) AddForwarded(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddForwarded", reflect.TypeOf((*MockAggregator)(nil).AddForwarded), arg0, arg1)
+}
+
+// AddPassthrough mocks base method
+func (m *MockAggregator) AddPassthrough(arg0 aggregated.Metric, arg1 policy.StoragePolicy) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddPassthrough", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddPassthrough indicates an expected call of AddPassthrough
+func (mr *MockAggregatorMockRecorder) AddPassthrough(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPassthrough", reflect.TypeOf((*MockAggregator)(nil).AddPassthrough), arg0, arg1)
+}
+
+// AddTimed mocks base method
+func (m *MockAggregator) AddTimed(arg0 aggregated.Metric, arg1 metadata.TimedMetadata) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddTimed", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddTimed indicates an expected call of AddTimed
+func (mr *MockAggregatorMockRecorder) AddTimed(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTimed", reflect.TypeOf((*MockAggregator)(nil).AddTimed), arg0, arg1)
+}
+
+// AddTimedWithStagedMetadatas mocks base method
+func (m *MockAggregator) AddTimedWithStagedMetadatas(arg0 aggregated.Metric, arg1 metadata.StagedMetadatas) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddTimedWithStagedMetadatas", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddTimedWithStagedMetadatas indicates an expected call of AddTimedWithStagedMetadatas
+func (mr *MockAggregatorMockRecorder) AddTimedWithStagedMetadatas(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTimedWithStagedMetadatas", reflect.TypeOf((*MockAggregator)(nil).AddTimedWithStagedMetadatas), arg0, arg1)
+}
+
+// AddUntimed mocks base method
+func (m *MockAggregator) AddUntimed(arg0 unaggregated.MetricUnion, arg1 metadata.StagedMetadatas) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddUntimed", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddUntimed indicates an expected call of AddUntimed
+func (mr *MockAggregatorMockRecorder) AddUntimed(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUntimed", reflect.TypeOf((*MockAggregator)(nil).AddUntimed), arg0, arg1)
+}
+
+// Close mocks base method
+func (m *MockAggregator) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close
+func (mr *MockAggregatorMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockAggregator)(nil).Close))
+}
+
+// Open mocks base method
+func (m *MockAggregator) Open() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Open")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Open indicates an expected call of Open
+func (mr *MockAggregatorMockRecorder) Open() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Open", reflect.TypeOf((*MockAggregator)(nil).Open))
+}
+
+// Resign mocks base method
+func (m *MockAggregator) Resign() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Resign")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Resign indicates an expected call of Resign
+func (mr *MockAggregatorMockRecorder) Resign() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Resign", reflect.TypeOf((*MockAggregator)(nil).Resign))
+}
+
+// Status mocks base method
+func (m *MockAggregator) Status() RuntimeStatus {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Status")
+ ret0, _ := ret[0].(RuntimeStatus)
+ return ret0
+}
+
+// Status indicates an expected call of Status
+func (mr *MockAggregatorMockRecorder) Status() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockAggregator)(nil).Status))
+}
+
// MockElectionManager is a mock of ElectionManager interface
type MockElectionManager struct {
ctrl *gomock.Controller
diff --git a/src/aggregator/aggregator/aggregator_test.go b/src/aggregator/aggregator/aggregator_test.go
index 5a4b3cd079..217c05ffa1 100644
--- a/src/aggregator/aggregator/aggregator_test.go
+++ b/src/aggregator/aggregator/aggregator_test.go
@@ -44,6 +44,7 @@ import (
"github.com/m3db/m3/src/metrics/pipeline"
"github.com/m3db/m3/src/metrics/pipeline/applied"
"github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/x/instrument"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
@@ -77,6 +78,12 @@ var (
TimeNanos: 12345,
Values: []float64{76109, 23891},
}
+ testPassthroughMetric = aggregated.Metric{
+ Type: metric.CounterType,
+ ID: []byte("testPassthrough"),
+ TimeNanos: 12345,
+ Value: 1000,
+ }
testInvalidMetric = unaggregated.MetricUnion{
Type: metric.UnknownType,
ID: []byte("testInvalid"),
@@ -126,6 +133,7 @@ var (
SourceID: 1234,
NumForwardedTimes: 3,
}
+ testPassthroughStroagePolicy = policy.NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour)
)
func TestAggregatorOpenAlreadyOpen(t *testing.T) {
@@ -664,6 +672,25 @@ func TestAggregatorResignSuccess(t *testing.T) {
require.NoError(t, agg.Resign())
}
+func TestAggregatorAddPassthroughNotOpen(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ agg, _ := testAggregator(t, ctrl)
+ err := agg.AddPassthrough(testPassthroughMetric, testPassthroughStroagePolicy)
+ require.Equal(t, errAggregatorNotOpenOrClosed, err)
+}
+
+func TestAggregatorAddPassthroughSuccess(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ agg, _ := testAggregator(t, ctrl)
+ require.NoError(t, agg.Open())
+ err := agg.AddPassthrough(testPassthroughMetric, testPassthroughStroagePolicy)
+ require.NoError(t, err)
+}
+
func TestAggregatorStatus(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -919,7 +946,7 @@ func TestAggregatorOwnedShards(t *testing.T) {
func TestAggregatorAddMetricMetrics(t *testing.T) {
s := tally.NewTestScope("testScope", nil)
- m := newAggregatorAddUntimedMetrics(s, 1.0)
+ m := newAggregatorAddUntimedMetrics(s, instrument.TimerOptions{})
m.ReportSuccess(time.Second)
m.ReportError(errInvalidMetricType)
m.ReportError(errShardNotOwned)
@@ -964,7 +991,7 @@ func TestAggregatorAddMetricMetrics(t *testing.T) {
func TestAggregatorAddTimedMetrics(t *testing.T) {
s := tally.NewTestScope("testScope", nil)
- m := newAggregatorAddTimedMetrics(s, 1.0)
+ m := newAggregatorAddTimedMetrics(s, instrument.TimerOptions{})
m.ReportSuccess(time.Second)
m.ReportError(errShardNotOwned)
m.ReportError(errAggregatorShardNotWriteable)
@@ -1100,6 +1127,7 @@ func testOptions(ctrl *gomock.Controller) Options {
electionMgr.EXPECT().Reset().Return(nil).AnyTimes()
electionMgr.EXPECT().Open(gomock.Any()).Return(nil).AnyTimes()
electionMgr.EXPECT().Close().Return(nil).AnyTimes()
+ electionMgr.EXPECT().ElectionState().Return(LeaderState).AnyTimes()
flushManager := NewMockFlushManager(ctrl)
flushManager.EXPECT().Reset().Return(nil).AnyTimes()
@@ -1117,6 +1145,11 @@ func testOptions(ctrl *gomock.Controller) Options {
h.EXPECT().NewWriter(gomock.Any()).Return(w, nil).AnyTimes()
h.EXPECT().Close().AnyTimes()
+ pw := writer.NewMockWriter(ctrl)
+ pw.EXPECT().Write(gomock.Any()).Return(nil).AnyTimes()
+ pw.EXPECT().Flush().Return(nil).AnyTimes()
+ pw.EXPECT().Close().Return(nil).AnyTimes()
+
cl := client.NewMockAdminClient(ctrl)
cl.EXPECT().Flush().Return(nil).AnyTimes()
cl.EXPECT().Close().AnyTimes()
@@ -1133,6 +1166,7 @@ func testOptions(ctrl *gomock.Controller) Options {
SetElectionManager(electionMgr).
SetFlushManager(flushManager).
SetFlushHandler(h).
+ SetPassthroughWriter(pw).
SetAdminClient(cl).
SetMaxAllowedForwardingDelayFn(infiniteAllowedDelayFn).
SetBufferForFutureTimedMetric(math.MaxInt64).
diff --git a/src/aggregator/aggregator/capture/aggregator.go b/src/aggregator/aggregator/capture/aggregator.go
index 1695506e1f..d54ca38266 100644
--- a/src/aggregator/aggregator/capture/aggregator.go
+++ b/src/aggregator/aggregator/capture/aggregator.go
@@ -39,12 +39,14 @@ import (
type aggregator struct {
sync.RWMutex
- numMetricsAdded int
- countersWithMetadatas []unaggregated.CounterWithMetadatas
- batchTimersWithMetadatas []unaggregated.BatchTimerWithMetadatas
- gaugesWithMetadatas []unaggregated.GaugeWithMetadatas
- forwardedMetricsWithMetadata []aggregated.ForwardedMetricWithMetadata
- timedMetricsWithMetadata []aggregated.TimedMetricWithMetadata
+ numMetricsAdded int
+ countersWithMetadatas []unaggregated.CounterWithMetadatas
+ batchTimersWithMetadatas []unaggregated.BatchTimerWithMetadatas
+ gaugesWithMetadatas []unaggregated.GaugeWithMetadatas
+ forwardedMetricsWithMetadata []aggregated.ForwardedMetricWithMetadata
+ timedMetricsWithMetadata []aggregated.TimedMetricWithMetadata
+ timedMetricsWithMetadatas []aggregated.TimedMetricWithMetadatas
+ passthroughMetricsWithMetadata []aggregated.PassthroughMetricWithMetadata
}
// NewAggregator creates a new capturing aggregator.
@@ -111,6 +113,26 @@ func (agg *aggregator) AddTimed(
return nil
}
+func (agg *aggregator) AddTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ sm metadata.StagedMetadatas,
+) error {
+ // Clone the metric and timed metadata to ensure it cannot be mutated externally.
+ metric = cloneTimedMetric(metric)
+ sm = cloneStagedMetadatas(sm)
+
+ agg.Lock()
+ defer agg.Unlock()
+
+ tms := aggregated.TimedMetricWithMetadatas{
+ Metric: metric,
+ StagedMetadatas: sm,
+ }
+ agg.timedMetricsWithMetadatas = append(agg.timedMetricsWithMetadatas, tms)
+ agg.numMetricsAdded++
+ return nil
+}
+
func (agg *aggregator) AddForwarded(
metric aggregated.ForwardedMetric,
metadata metadata.ForwardMetadata,
@@ -131,6 +153,26 @@ func (agg *aggregator) AddForwarded(
return nil
}
+func (agg *aggregator) AddPassthrough(
+ metric aggregated.Metric,
+ storagePolicy policy.StoragePolicy,
+) error {
+ // Clone the metric and timed metadata to ensure it cannot be mutated externally.
+ metric = cloneTimedMetric(metric)
+ storagePolicy = cloneStoragePolicy(storagePolicy)
+
+ agg.Lock()
+ defer agg.Unlock()
+
+ pm := aggregated.PassthroughMetricWithMetadata{
+ Metric: metric,
+ StoragePolicy: storagePolicy,
+ }
+ agg.passthroughMetricsWithMetadata = append(agg.passthroughMetricsWithMetadata, pm)
+ agg.numMetricsAdded++
+ return nil
+}
+
func (agg *aggregator) Resign() error { return nil }
func (agg *aggregator) Status() aggr.RuntimeStatus { return aggr.RuntimeStatus{} }
func (agg *aggregator) Close() error { return nil }
@@ -146,17 +188,19 @@ func (agg *aggregator) Snapshot() SnapshotResult {
agg.Lock()
result := SnapshotResult{
- CountersWithMetadatas: agg.countersWithMetadatas,
- BatchTimersWithMetadatas: agg.batchTimersWithMetadatas,
- GaugesWithMetadatas: agg.gaugesWithMetadatas,
- ForwardedMetricsWithMetadata: agg.forwardedMetricsWithMetadata,
- TimedMetricWithMetadata: agg.timedMetricsWithMetadata,
+ CountersWithMetadatas: agg.countersWithMetadatas,
+ BatchTimersWithMetadatas: agg.batchTimersWithMetadatas,
+ GaugesWithMetadatas: agg.gaugesWithMetadatas,
+ ForwardedMetricsWithMetadata: agg.forwardedMetricsWithMetadata,
+ TimedMetricWithMetadata: agg.timedMetricsWithMetadata,
+ PassthroughMetricWithMetadata: agg.passthroughMetricsWithMetadata,
}
agg.countersWithMetadatas = nil
agg.batchTimersWithMetadatas = nil
agg.gaugesWithMetadatas = nil
agg.forwardedMetricsWithMetadata = nil
agg.timedMetricsWithMetadata = nil
+ agg.passthroughMetricsWithMetadata = nil
agg.numMetricsAdded = 0
agg.Unlock()
@@ -242,3 +286,8 @@ func cloneTimedMetadata(meta metadata.TimedMetadata) metadata.TimedMetadata {
cloned := meta
return cloned
}
+
+func cloneStoragePolicy(sp policy.StoragePolicy) policy.StoragePolicy {
+ cloned := sp
+ return cloned
+}
diff --git a/src/aggregator/aggregator/capture/aggregator_test.go b/src/aggregator/aggregator/capture/aggregator_test.go
index 85bf3172ca..6032b4e0b5 100644
--- a/src/aggregator/aggregator/capture/aggregator_test.go
+++ b/src/aggregator/aggregator/capture/aggregator_test.go
@@ -47,17 +47,17 @@ var (
}
testBatchTimer = unaggregated.MetricUnion{
Type: metric.TimerType,
- ID: id.RawID("testCounter"),
+ ID: id.RawID("testBatchTimer"),
BatchTimerVal: []float64{1.0, 3.5, 2.2, 6.5, 4.8},
}
testGauge = unaggregated.MetricUnion{
Type: metric.GaugeType,
- ID: id.RawID("testCounter"),
+ ID: id.RawID("testGauge"),
GaugeVal: 123.456,
}
testTimed = aggregated.Metric{
Type: metric.CounterType,
- ID: []byte("testForwarded"),
+ ID: []byte("testTimed"),
TimeNanos: 12345,
Value: -13.5,
}
@@ -67,6 +67,12 @@ var (
TimeNanos: 12345,
Values: []float64{908, -13.5},
}
+ testPassthrough = aggregated.Metric{
+ Type: metric.CounterType,
+ ID: []byte("testPassthrough"),
+ TimeNanos: 12345,
+ Value: -12.3,
+ }
testInvalid = unaggregated.MetricUnion{
Type: metric.UnknownType,
ID: id.RawID("invalid"),
@@ -91,6 +97,7 @@ var (
SourceID: 1234,
NumForwardedTimes: 3,
}
+ testPassthroughStoragePolicy = policy.NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour)
)
func TestAggregator(t *testing.T) {
@@ -155,6 +162,17 @@ func TestAggregator(t *testing.T) {
require.Equal(t, 5, agg.NumMetricsAdded())
+ // Add valid passthrough metrics with storage policy.
+ expected.PassthroughMetricWithMetadata = append(
+ expected.PassthroughMetricWithMetadata,
+ aggregated.PassthroughMetricWithMetadata{
+ Metric: testPassthrough,
+ StoragePolicy: testPassthroughStoragePolicy,
+ },
+ )
+ require.NoError(t, agg.AddPassthrough(testPassthrough, testPassthroughStoragePolicy))
+ require.Equal(t, 6, agg.NumMetricsAdded())
+
res := agg.Snapshot()
require.Equal(t, expected, res)
}
diff --git a/src/aggregator/aggregator/capture/types.go b/src/aggregator/aggregator/capture/types.go
index 8e0ac3f2b2..da7c2d66aa 100644
--- a/src/aggregator/aggregator/capture/types.go
+++ b/src/aggregator/aggregator/capture/types.go
@@ -40,9 +40,10 @@ type Aggregator interface {
// SnapshotResult is the snapshot result.
type SnapshotResult struct {
- CountersWithMetadatas []unaggregated.CounterWithMetadatas
- BatchTimersWithMetadatas []unaggregated.BatchTimerWithMetadatas
- GaugesWithMetadatas []unaggregated.GaugeWithMetadatas
- ForwardedMetricsWithMetadata []aggregated.ForwardedMetricWithMetadata
- TimedMetricWithMetadata []aggregated.TimedMetricWithMetadata
+ CountersWithMetadatas []unaggregated.CounterWithMetadatas
+ BatchTimersWithMetadatas []unaggregated.BatchTimerWithMetadatas
+ GaugesWithMetadatas []unaggregated.GaugeWithMetadatas
+ ForwardedMetricsWithMetadata []aggregated.ForwardedMetricWithMetadata
+ TimedMetricWithMetadata []aggregated.TimedMetricWithMetadata
+ PassthroughMetricWithMetadata []aggregated.PassthroughMetricWithMetadata
}
diff --git a/src/aggregator/aggregator/counter_elem_gen.go b/src/aggregator/aggregator/counter_elem_gen.go
index ff1566a5b8..a6f3c720d3 100644
--- a/src/aggregator/aggregator/counter_elem_gen.go
+++ b/src/aggregator/aggregator/counter_elem_gen.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -63,10 +63,10 @@ type CounterElem struct {
elemBase
counterElemBase
- values []timedCounter // metric aggregations sorted by time in ascending order
- toConsume []timedCounter // small buffer to avoid memory allocations during consumption
- lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
- lastConsumedValues []float64 // last consumed values
+ values []timedCounter // metric aggregations sorted by time in ascending order
+ toConsume []timedCounter // small buffer to avoid memory allocations during consumption
+ lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
+ lastConsumedValues []transformation.Datapoint // last consumed values
}
// NewCounterElem creates a new element for the given metric type.
@@ -132,11 +132,11 @@ func (e *CounterElem) ResetSetData(
}
numAggTypes := len(e.aggTypes)
if cap(e.lastConsumedValues) < numAggTypes {
- e.lastConsumedValues = make([]float64, numAggTypes)
+ e.lastConsumedValues = make([]transformation.Datapoint, numAggTypes)
}
e.lastConsumedValues = e.lastConsumedValues[:numAggTypes]
for i := 0; i < len(e.lastConsumedValues); i++ {
- e.lastConsumedValues[i] = nan
+ e.lastConsumedValues[i] = transformation.Datapoint{Value: nan}
}
return nil
}
@@ -153,7 +153,7 @@ func (e *CounterElem) AddUnion(timestamp time.Time, mu unaggregated.MetricUnion)
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.AddUnion(mu)
+ lockedAgg.aggregation.AddUnion(timestamp, mu)
lockedAgg.Unlock()
return nil
}
@@ -170,7 +170,7 @@ func (e *CounterElem) AddValue(timestamp time.Time, value float64) error {
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.Add(value)
+ lockedAgg.aggregation.Add(timestamp, value)
lockedAgg.Unlock()
return nil
}
@@ -196,7 +196,7 @@ func (e *CounterElem) AddUnique(timestamp time.Time, values []float64, sourceID
}
lockedAgg.sourcesSeen.Set(source)
for _, v := range values {
- lockedAgg.aggregation.Add(v)
+ lockedAgg.aggregation.Add(timestamp, v)
}
lockedAgg.Unlock()
return nil
@@ -412,28 +412,52 @@ func (e *CounterElem) processValueWithAggregationLock(
)
for aggTypeIdx, aggType := range e.aggTypes {
value := lockedAgg.aggregation.ValueOf(aggType)
- for i := 0; i < transformations.Len(); i++ {
- transformType := transformations.At(i).Transformation.Type
- if transformType.IsUnaryTransform() {
- fn := transformType.MustUnaryTransform()
- res := fn(transformation.Datapoint{TimeNanos: timeNanos, Value: value})
+ for _, transformOp := range transformations {
+ unaryOp, isUnaryOp := transformOp.UnaryTransform()
+ binaryOp, isBinaryOp := transformOp.BinaryTransform()
+ switch {
+ case isUnaryOp:
+ curr := transformation.Datapoint{
+ TimeNanos: timeNanos,
+ Value: value,
+ }
+
+ res := unaryOp.Evaluate(curr)
+
value = res.Value
- } else {
- fn := transformType.MustBinaryTransform()
- prev := transformation.Datapoint{TimeNanos: e.lastConsumedAtNanos, Value: e.lastConsumedValues[aggTypeIdx]}
- curr := transformation.Datapoint{TimeNanos: timeNanos, Value: value}
- res := fn(prev, curr)
+
+ case isBinaryOp:
+ lastTimeNanos := e.lastConsumedAtNanos
+ prev := transformation.Datapoint{
+ TimeNanos: lastTimeNanos,
+ Value: e.lastConsumedValues[aggTypeIdx].Value,
+ }
+
+ currTimeNanos := timeNanos
+ curr := transformation.Datapoint{
+ TimeNanos: currTimeNanos,
+ Value: value,
+ }
+
+ res := binaryOp.Evaluate(prev, curr)
+
// NB: we only need to record the value needed for derivative transformations.
// We currently only support first-order derivative transformations so we only
// need to keep one value. In the future if we need to support higher-order
// derivative transformations, we need to store an array of values here.
- e.lastConsumedValues[aggTypeIdx] = value
+ if !math.IsNaN(curr.Value) {
+ e.lastConsumedValues[aggTypeIdx] = curr
+ }
+
value = res.Value
+
}
}
+
if discardNaNValues && math.IsNaN(value) {
continue
}
+
if !e.parsedPipeline.HasRollup {
switch e.idPrefixSuffixType {
case NoPrefixNoSuffix:
diff --git a/src/aggregator/aggregator/elem_base.go b/src/aggregator/aggregator/elem_base.go
index 440b840620..eeff7cc5b2 100644
--- a/src/aggregator/aggregator/elem_base.go
+++ b/src/aggregator/aggregator/elem_base.go
@@ -35,7 +35,9 @@ import (
mpipeline "github.com/m3db/m3/src/metrics/pipeline"
"github.com/m3db/m3/src/metrics/pipeline/applied"
"github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/metrics/transformation"
"github.com/m3db/m3/src/x/pool"
+ "go.uber.org/zap"
"github.com/willf/bitset"
)
@@ -176,7 +178,7 @@ func newElemBase(opts Options) elemBase {
return elemBase{
opts: opts,
aggTypesOpts: opts.AggregationTypesOptions(),
- aggOpts: raggregation.NewOptions(),
+ aggOpts: raggregation.NewOptions(opts.InstrumentOptions()),
}
}
@@ -192,6 +194,8 @@ func (e *elemBase) resetSetData(
) error {
parsed, err := newParsedPipeline(pipeline)
if err != nil {
+ l := e.opts.InstrumentOptions().Logger()
+ l.Error("error parsing pipeline", zap.Error(err))
return err
}
e.id = id
@@ -378,9 +382,9 @@ type parsedPipeline struct {
// Whether the source pipeline contains derivative transformations at its head.
HasDerivativeTransform bool
- // Sub-pipline containing only transformation operations from the head
- // of the source pipeline this parsed pipeline was derived from.
- Transformations applied.Pipeline
+ // Transformation operations from the head of the source pipeline this
+ // parsed pipeline was derived from.
+ Transformations []transformation.Op
// Whether the source pipeline contains a rollup operation that is either at the
// head of the source pipeline or immediately following the transformation operations
@@ -417,7 +421,8 @@ func newParsedPipeline(pipeline applied.Pipeline) (parsedPipeline, error) {
for i := 0; i < numSteps; i++ {
pipelineOp := pipeline.At(i)
if pipelineOp.Type != mpipeline.TransformationOpType && pipelineOp.Type != mpipeline.RollupOpType {
- return parsedPipeline{}, fmt.Errorf("pipeline %v step %d has invalid operation type %v", pipeline, i, pipelineOp.Type)
+ err := fmt.Errorf("pipeline %v step %d has invalid operation type %v", pipeline, i, pipelineOp.Type)
+ return parsedPipeline{}, err
}
if pipelineOp.Type == mpipeline.RollupOpType {
if firstRollupOpIdx == -1 {
@@ -433,9 +438,7 @@ func newParsedPipeline(pipeline applied.Pipeline) (parsedPipeline, error) {
}
}
}
- if firstRollupOpIdx == -1 {
- return parsedPipeline{}, fmt.Errorf("pipeline %v has no rollup operations", pipeline)
- }
+
// Pipelines that compute higher order derivatives require keeping more states including
// the raw values and lower order derivatives. For example, a pipline such as `aggregate Last |
// perSecond | perSecond` requires storing both the raw value and the first-order derivatives.
@@ -445,11 +448,37 @@ func newParsedPipeline(pipeline applied.Pipeline) (parsedPipeline, error) {
if transformationDerivativeOrder > maxSupportedTransformationDerivativeOrder {
return parsedPipeline{}, fmt.Errorf("pipeline %v transformation derivative order is %d higher than supported %d", pipeline, transformationDerivativeOrder, maxSupportedTransformationDerivativeOrder)
}
+
+ var (
+ hasRollup = firstRollupOpIdx != -1
+ hasDerivativeTransform = transformationDerivativeOrder > 0
+ transformPipeline applied.Pipeline
+ remainder applied.Pipeline
+ rollup applied.RollupOp
+ )
+ if hasRollup {
+ transformPipeline = pipeline.SubPipeline(0, firstRollupOpIdx)
+ remainder = pipeline.SubPipeline(firstRollupOpIdx+1, numSteps)
+ rollup = pipeline.At(firstRollupOpIdx).Rollup
+ } else {
+ transformPipeline = pipeline
+ }
+
+ transformations := make([]transformation.Op, 0, transformPipeline.Len())
+ for i := 0; i < transformPipeline.Len(); i++ {
+ op, err := transformPipeline.At(i).Transformation.Type.NewOp()
+ if err != nil {
+ err := fmt.Errorf("transform could not construct op: %v", err)
+ return parsedPipeline{}, err
+ }
+ transformations = append(transformations, op)
+ }
+
return parsedPipeline{
- HasDerivativeTransform: transformationDerivativeOrder > 0,
- Transformations: pipeline.SubPipeline(0, firstRollupOpIdx),
- HasRollup: true,
- Rollup: pipeline.At(firstRollupOpIdx).Rollup,
- Remainder: pipeline.SubPipeline(firstRollupOpIdx+1, numSteps),
+ HasDerivativeTransform: hasDerivativeTransform,
+ HasRollup: hasRollup,
+ Transformations: transformations,
+ Remainder: remainder,
+ Rollup: rollup,
}, nil
}
diff --git a/src/aggregator/aggregator/elem_base_test.go b/src/aggregator/aggregator/elem_base_test.go
index e2350603b3..a929227a26 100644
--- a/src/aggregator/aggregator/elem_base_test.go
+++ b/src/aggregator/aggregator/elem_base_test.go
@@ -23,6 +23,7 @@ package aggregator
import (
"strings"
"testing"
+ "time"
raggregation "github.com/m3db/m3/src/aggregator/aggregation"
maggregation "github.com/m3db/m3/src/metrics/aggregation"
@@ -36,25 +37,40 @@ import (
"github.com/stretchr/testify/require"
)
+func mustNewOp(t require.TestingT, ttype transformation.Type) transformation.Op {
+ op, err := ttype.NewOp()
+ require.NoError(t, err)
+ return op
+}
+
func TestElemBaseID(t *testing.T) {
e := &elemBase{}
e.resetSetData(testCounterID, testStoragePolicy, maggregation.DefaultTypes, true, applied.DefaultPipeline, 0, NoPrefixNoSuffix)
require.Equal(t, testCounterID, e.ID())
}
+func requirePipelinesMatch(t require.TestingT, expected, actual parsedPipeline) {
+ // Compare transform types
+ require.Equal(t, len(expected.Transformations), len(actual.Transformations))
+ for i := range expected.Transformations {
+ require.Equal(t, expected.Transformations[i].Type(), actual.Transformations[i].Type())
+ }
+
+ // Note: transformations are now constructed each time, so not equal, nil out before comparison
+ expectedWithoutTransforms := expected
+ expectedWithoutTransforms.Transformations = nil
+ actualWithoutTransforms := actual
+ actualWithoutTransforms.Transformations = nil
+ require.Equal(t, expectedWithoutTransforms, actualWithoutTransforms)
+}
+
func TestElemBaseResetSetData(t *testing.T) {
expectedParsedPipeline := parsedPipeline{
HasDerivativeTransform: true,
- Transformations: applied.NewPipeline([]applied.OpUnion{
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
- },
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.PerSecond},
- },
- }),
+ Transformations: []transformation.Op{
+ mustNewOp(t, transformation.Absolute),
+ mustNewOp(t, transformation.PerSecond),
+ },
HasRollup: true,
Rollup: applied.RollupOp{
ID: []byte("foo.bar"),
@@ -77,24 +93,25 @@ func TestElemBaseResetSetData(t *testing.T) {
require.Equal(t, testAggregationTypesExpensive, e.aggTypes)
require.False(t, e.useDefaultAggregation)
require.True(t, e.aggOpts.HasExpensiveAggregations)
- require.Equal(t, expectedParsedPipeline, e.parsedPipeline)
+
+ requirePipelinesMatch(t, expectedParsedPipeline, e.parsedPipeline)
+
require.Equal(t, 3, e.numForwardedTimes)
require.False(t, e.tombstoned)
require.False(t, e.closed)
require.Equal(t, WithPrefixWithSuffix, e.idPrefixSuffixType)
}
-func TestElemBaseResetSetDataInvalidPipeline(t *testing.T) {
- invalidPipeline := applied.NewPipeline([]applied.OpUnion{
+func TestElemBaseResetSetDataNoRollup(t *testing.T) {
+ pipelineNoRollup := applied.NewPipeline([]applied.OpUnion{
{
Type: pipeline.TransformationOpType,
Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
},
})
e := &elemBase{}
- err := e.resetSetData(testCounterID, testStoragePolicy, testAggregationTypes, false, invalidPipeline, 0, WithPrefixWithSuffix)
- require.Error(t, err)
- require.True(t, strings.Contains(err.Error(), "has no rollup operations"))
+ err := e.resetSetData(testCounterID, testStoragePolicy, testAggregationTypes, false, pipelineNoRollup, 0, WithPrefixWithSuffix)
+ require.NoError(t, err)
}
func TestElemBaseForwardedIDWithDefaultPipeline(t *testing.T) {
@@ -167,11 +184,11 @@ func TestCounterElemBase(t *testing.T) {
func TestCounterElemBaseNewAggregation(t *testing.T) {
e := counterElemBase{}
la := e.NewAggregation(nil, raggregation.Options{})
- la.AddUnion(unaggregated.MetricUnion{
+ la.AddUnion(time.Now(), unaggregated.MetricUnion{
Type: metric.CounterType,
CounterVal: 100,
})
- la.AddUnion(unaggregated.MetricUnion{
+ la.AddUnion(time.Now(), unaggregated.MetricUnion{
Type: metric.CounterType,
CounterVal: 200,
})
@@ -217,11 +234,11 @@ func TestTimerElemBase(t *testing.T) {
func TestTimerElemBaseNewAggregation(t *testing.T) {
e := timerElemBase{}
la := e.NewAggregation(NewOptions(), raggregation.Options{})
- la.AddUnion(unaggregated.MetricUnion{
+ la.AddUnion(time.Now(), unaggregated.MetricUnion{
Type: metric.TimerType,
BatchTimerVal: []float64{100.0, 200.0},
})
- la.AddUnion(unaggregated.MetricUnion{
+ la.AddUnion(time.Now(), unaggregated.MetricUnion{
Type: metric.TimerType,
BatchTimerVal: []float64{300.0, 400.0, 500.0},
})
@@ -276,11 +293,11 @@ func TestGaugeElemBase(t *testing.T) {
func TestGaugeElemBaseNewLockedAggregation(t *testing.T) {
e := gaugeElemBase{}
la := e.NewAggregation(nil, raggregation.Options{})
- la.AddUnion(unaggregated.MetricUnion{
+ la.AddUnion(time.Now(), unaggregated.MetricUnion{
Type: metric.GaugeType,
GaugeVal: 100.0,
})
- la.AddUnion(unaggregated.MetricUnion{
+ la.AddUnion(time.Now(), unaggregated.MetricUnion{
Type: metric.GaugeType,
GaugeVal: 200.0,
})
@@ -337,7 +354,6 @@ func TestParsePipelineNoTransformation(t *testing.T) {
})
expected := parsedPipeline{
HasDerivativeTransform: false,
- Transformations: applied.NewPipeline([]applied.OpUnion{}),
HasRollup: true,
Rollup: applied.RollupOp{
ID: []byte("foo"),
@@ -366,7 +382,7 @@ func TestParsePipelineNoTransformation(t *testing.T) {
}
parsed, err := newParsedPipeline(p)
require.NoError(t, err)
- require.Equal(t, expected, parsed)
+ requirePipelinesMatch(t, expected, parsed)
}
func TestParsePipelineWithNonDerivativeTransformation(t *testing.T) {
@@ -403,13 +419,8 @@ func TestParsePipelineWithNonDerivativeTransformation(t *testing.T) {
})
expected := parsedPipeline{
HasDerivativeTransform: false,
- Transformations: applied.NewPipeline([]applied.OpUnion{
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
- },
- }),
- HasRollup: true,
+ Transformations: []transformation.Op{mustNewOp(t, transformation.Absolute)},
+ HasRollup: true,
Rollup: applied.RollupOp{
ID: []byte("foo"),
AggregationID: maggregation.MustCompressTypes(maggregation.Count),
@@ -437,7 +448,7 @@ func TestParsePipelineWithNonDerivativeTransformation(t *testing.T) {
}
parsed, err := newParsedPipeline(p)
require.NoError(t, err)
- require.Equal(t, expected, parsed)
+ requirePipelinesMatch(t, expected, parsed)
}
func TestParsePipelineWithDerivativeTransformation(t *testing.T) {
@@ -478,16 +489,10 @@ func TestParsePipelineWithDerivativeTransformation(t *testing.T) {
})
expected := parsedPipeline{
HasDerivativeTransform: true,
- Transformations: applied.NewPipeline([]applied.OpUnion{
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.PerSecond},
- },
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
- },
- }),
+ Transformations: []transformation.Op{
+ mustNewOp(t, transformation.PerSecond),
+ mustNewOp(t, transformation.Absolute),
+ },
HasRollup: true,
Rollup: applied.RollupOp{
ID: []byte("foo"),
@@ -516,7 +521,7 @@ func TestParsePipelineWithDerivativeTransformation(t *testing.T) {
}
parsed, err := newParsedPipeline(p)
require.NoError(t, err)
- require.Equal(t, expected, parsed)
+ requirePipelinesMatch(t, expected, parsed)
}
func TestParsePipelineInvalidOperationType(t *testing.T) {
@@ -542,8 +547,7 @@ func TestParsePipelineNoRollupOperation(t *testing.T) {
},
})
_, err := newParsedPipeline(p)
- require.Error(t, err)
- require.True(t, strings.Contains(err.Error(), "has no rollup operations"))
+ require.NoError(t, err)
}
func TestParsePipelineTransformationDerivativeOrderTooHigh(t *testing.T) {
diff --git a/src/aggregator/aggregator/elem_test.go b/src/aggregator/aggregator/elem_test.go
index fb48068d1c..f3e223897b 100644
--- a/src/aggregator/aggregator/elem_test.go
+++ b/src/aggregator/aggregator/elem_test.go
@@ -128,16 +128,10 @@ func TestCounterResetSetData(t *testing.T) {
// Reset element with a pipeline containing a derivative transformation.
expectedParsedPipeline := parsedPipeline{
HasDerivativeTransform: true,
- Transformations: applied.NewPipeline([]applied.OpUnion{
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
- },
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.PerSecond},
- },
- }),
+ Transformations: []transformation.Op{
+ mustNewOp(t, transformation.Absolute),
+ mustNewOp(t, transformation.PerSecond),
+ },
HasRollup: true,
Rollup: applied.RollupOp{
ID: []byte("foo.bar"),
@@ -155,10 +149,10 @@ func TestCounterResetSetData(t *testing.T) {
}
err = ce.ResetSetData(testCounterID, testStoragePolicy, testAggregationTypesExpensive, testPipeline, 0, NoPrefixNoSuffix)
require.NoError(t, err)
- require.Equal(t, expectedParsedPipeline, ce.parsedPipeline)
+ requirePipelinesMatch(t, expectedParsedPipeline, ce.parsedPipeline)
require.Equal(t, len(testAggregationTypesExpensive), len(ce.lastConsumedValues))
for i := 0; i < len(ce.lastConsumedValues); i++ {
- require.True(t, math.IsNaN(ce.lastConsumedValues[i]))
+ require.True(t, math.IsNaN(ce.lastConsumedValues[i].Value))
}
}
@@ -169,18 +163,18 @@ func TestCounterResetSetDataInvalidAggregationType(t *testing.T) {
require.Error(t, err)
}
-func TestCounterResetSetDataInvalidPipeline(t *testing.T) {
+func TestCounterResetSetDataNoRollup(t *testing.T) {
opts := NewOptions()
ce := MustNewCounterElem(nil, policy.EmptyStoragePolicy, maggregation.DefaultTypes, applied.DefaultPipeline, testNumForwardedTimes, NoPrefixNoSuffix, opts)
- invalidPipeline := applied.NewPipeline([]applied.OpUnion{
+ pipelineNoRollup := applied.NewPipeline([]applied.OpUnion{
{
Type: pipeline.TransformationOpType,
Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
},
})
- err := ce.ResetSetData(testCounterID, testStoragePolicy, maggregation.DefaultTypes, invalidPipeline, 0, NoPrefixNoSuffix)
- require.Error(t, err)
+ err := ce.ResetSetData(testCounterID, testStoragePolicy, maggregation.DefaultTypes, pipelineNoRollup, 0, NoPrefixNoSuffix)
+ require.NoError(t, err)
}
func TestCounterElemAddUnion(t *testing.T) {
@@ -535,7 +529,8 @@ func TestCounterElemConsumeCustomAggregationCustomPipeline(t *testing.T) {
require.Equal(t, 0, len(*localRes))
require.Equal(t, 2, len(e.values))
require.Equal(t, time.Unix(220, 0).UnixNano(), e.lastConsumedAtNanos)
- require.Equal(t, []float64{123.0}, e.lastConsumedValues)
+ require.Equal(t, 1, len(e.lastConsumedValues))
+ require.Equal(t, 123.0, e.lastConsumedValues[0].Value)
// Consume all values.
expectedForwardedRes = []testForwardedMetricWithMetadata{
@@ -559,7 +554,8 @@ func TestCounterElemConsumeCustomAggregationCustomPipeline(t *testing.T) {
require.Equal(t, 0, len(*localRes))
require.Equal(t, 0, len(e.values))
require.Equal(t, time.Unix(240, 0).UnixNano(), e.lastConsumedAtNanos)
- require.Equal(t, []float64{589.0}, e.lastConsumedValues)
+ require.Equal(t, 1, len(e.lastConsumedValues))
+ require.Equal(t, 589.0, e.lastConsumedValues[0].Value)
// Tombstone the element and discard all values.
e.tombstoned = true
@@ -683,16 +679,10 @@ func TestTimerResetSetData(t *testing.T) {
// Reset element with a pipeline containing a derivative transformation.
expectedParsedPipeline := parsedPipeline{
HasDerivativeTransform: true,
- Transformations: applied.NewPipeline([]applied.OpUnion{
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
- },
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.PerSecond},
- },
- }),
+ Transformations: []transformation.Op{
+ mustNewOp(t, transformation.Absolute),
+ mustNewOp(t, transformation.PerSecond),
+ },
HasRollup: true,
Rollup: applied.RollupOp{
ID: []byte("foo.bar"),
@@ -710,10 +700,10 @@ func TestTimerResetSetData(t *testing.T) {
}
err = te.ResetSetData(testBatchTimerID, testStoragePolicy, testAggregationTypesExpensive, testPipeline, 0, NoPrefixNoSuffix)
require.NoError(t, err)
- require.Equal(t, expectedParsedPipeline, te.parsedPipeline)
+ requirePipelinesMatch(t, expectedParsedPipeline, te.parsedPipeline)
require.Equal(t, len(testAggregationTypesExpensive), len(te.lastConsumedValues))
for i := 0; i < len(te.lastConsumedValues); i++ {
- require.True(t, math.IsNaN(te.lastConsumedValues[i]))
+ require.True(t, math.IsNaN(te.lastConsumedValues[i].Value))
}
}
@@ -724,18 +714,18 @@ func TestTimerResetSetDataInvalidAggregationType(t *testing.T) {
require.Error(t, err)
}
-func TestTimerResetSetDataInvalidPipeline(t *testing.T) {
+func TestTimerResetSetDataNoRollup(t *testing.T) {
opts := NewOptions()
te := MustNewTimerElem(nil, policy.EmptyStoragePolicy, maggregation.DefaultTypes, applied.DefaultPipeline, testNumForwardedTimes, NoPrefixNoSuffix, opts)
- invalidPipeline := applied.NewPipeline([]applied.OpUnion{
+ pipelineNoRollup := applied.NewPipeline([]applied.OpUnion{
{
Type: pipeline.TransformationOpType,
Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
},
})
- err := te.ResetSetData(testBatchTimerID, testStoragePolicy, maggregation.DefaultTypes, invalidPipeline, 0, NoPrefixNoSuffix)
- require.Error(t, err)
+ err := te.ResetSetData(testBatchTimerID, testStoragePolicy, maggregation.DefaultTypes, pipelineNoRollup, 0, NoPrefixNoSuffix)
+ require.NoError(t, err)
}
func TestTimerElemAddUnion(t *testing.T) {
@@ -1033,7 +1023,8 @@ func TestTimerElemConsumeCustomAggregationCustomPipeline(t *testing.T) {
require.Equal(t, 0, len(*localRes))
require.Equal(t, 2, len(e.values))
require.Equal(t, time.Unix(220, 0).UnixNano(), e.lastConsumedAtNanos)
- require.Equal(t, []float64{123.0}, e.lastConsumedValues)
+ require.Equal(t, 1, len(e.lastConsumedValues))
+ require.Equal(t, 123.0, e.lastConsumedValues[0].Value)
// Consume all values.
expectedForwardedRes = []testForwardedMetricWithMetadata{
@@ -1057,7 +1048,8 @@ func TestTimerElemConsumeCustomAggregationCustomPipeline(t *testing.T) {
require.Equal(t, 0, len(*localRes))
require.Equal(t, 0, len(e.values))
require.Equal(t, time.Unix(240, 0).UnixNano(), e.lastConsumedAtNanos)
- require.Equal(t, []float64{589.0}, e.lastConsumedValues)
+ require.Equal(t, 1, len(e.lastConsumedValues))
+ require.Equal(t, 589.0, e.lastConsumedValues[0].Value)
// Tombstone the element and discard all values.
e.tombstoned = true
@@ -1186,16 +1178,10 @@ func TestGaugeResetSetData(t *testing.T) {
// Reset element with a pipeline containing a derivative transformation.
expectedParsedPipeline := parsedPipeline{
HasDerivativeTransform: true,
- Transformations: applied.NewPipeline([]applied.OpUnion{
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.Absolute},
- },
- {
- Type: pipeline.TransformationOpType,
- Transformation: pipeline.TransformationOp{Type: transformation.PerSecond},
- },
- }),
+ Transformations: []transformation.Op{
+ mustNewOp(t, transformation.Absolute),
+ mustNewOp(t, transformation.PerSecond),
+ },
HasRollup: true,
Rollup: applied.RollupOp{
ID: []byte("foo.bar"),
@@ -1213,10 +1199,10 @@ func TestGaugeResetSetData(t *testing.T) {
}
err = ge.ResetSetData(testGaugeID, testStoragePolicy, testAggregationTypesExpensive, testPipeline, 0, NoPrefixNoSuffix)
require.NoError(t, err)
- require.Equal(t, expectedParsedPipeline, ge.parsedPipeline)
+ requirePipelinesMatch(t, expectedParsedPipeline, ge.parsedPipeline)
require.Equal(t, len(testAggregationTypesExpensive), len(ge.lastConsumedValues))
for i := 0; i < len(ge.lastConsumedValues); i++ {
- require.True(t, math.IsNaN(ge.lastConsumedValues[i]))
+ require.True(t, math.IsNaN(ge.lastConsumedValues[i].Value))
}
}
@@ -1576,7 +1562,8 @@ func TestGaugeElemConsumeCustomAggregationCustomPipeline(t *testing.T) {
require.Equal(t, 0, len(*localRes))
require.Equal(t, 2, len(e.values))
require.Equal(t, time.Unix(220, 0).UnixNano(), e.lastConsumedAtNanos)
- require.Equal(t, []float64{123.0}, e.lastConsumedValues)
+ require.Equal(t, 1, len(e.lastConsumedValues))
+ require.Equal(t, 123.0, e.lastConsumedValues[0].Value)
// Consume all values.
expectedForwardedRes = []testForwardedMetricWithMetadata{
@@ -1600,7 +1587,8 @@ func TestGaugeElemConsumeCustomAggregationCustomPipeline(t *testing.T) {
require.Equal(t, 0, len(*localRes))
require.Equal(t, 0, len(e.values))
require.Equal(t, time.Unix(240, 0).UnixNano(), e.lastConsumedAtNanos)
- require.Equal(t, []float64{589.0}, e.lastConsumedValues)
+ require.Equal(t, 1, len(e.lastConsumedValues))
+ require.Equal(t, 589.0, e.lastConsumedValues[0].Value)
// Tombstone the element and discard all values.
e.tombstoned = true
@@ -1804,7 +1792,7 @@ func testCounterElem(
e := MustNewCounterElem(testCounterID, testStoragePolicy, aggTypes, pipeline, testNumForwardedTimes, WithPrefixWithSuffix, opts)
for i, aligned := range alignedstartAtNanos {
counter := &lockedCounterAggregation{aggregation: newCounterAggregation(raggregation.NewCounter(e.aggOpts))}
- counter.aggregation.Update(counterVals[i])
+ counter.aggregation.Update(time.Unix(0, aligned), counterVals[i])
e.values = append(e.values, timedCounter{
startAtNanos: aligned,
lockedAgg: counter,
@@ -1824,7 +1812,7 @@ func testTimerElem(
for i, aligned := range alignedstartAtNanos {
newTimer := raggregation.NewTimer(opts.AggregationTypesOptions().Quantiles(), opts.StreamOptions(), e.aggOpts)
timer := &lockedTimerAggregation{aggregation: newTimerAggregation(newTimer)}
- timer.aggregation.AddBatch(timerBatches[i])
+ timer.aggregation.AddBatch(time.Now(), timerBatches[i])
e.values = append(e.values, timedTimer{
startAtNanos: aligned,
lockedAgg: timer,
@@ -1843,7 +1831,7 @@ func testGaugeElem(
e := MustNewGaugeElem(testGaugeID, testStoragePolicy, aggTypes, pipeline, testNumForwardedTimes, WithPrefixWithSuffix, opts)
for i, aligned := range alignedstartAtNanos {
gauge := &lockedGaugeAggregation{aggregation: newGaugeAggregation(raggregation.NewGauge(e.aggOpts))}
- gauge.aggregation.Update(gaugeVals[i])
+ gauge.aggregation.Update(time.Unix(0, aligned), gaugeVals[i])
e.values = append(e.values, timedGauge{
startAtNanos: aligned,
lockedAgg: gauge,
diff --git a/src/aggregator/aggregator/entry.go b/src/aggregator/aggregator/entry.go
index 4729dddba3..0c4f5cdd02 100644
--- a/src/aggregator/aggregator/entry.go
+++ b/src/aggregator/aggregator/entry.go
@@ -35,6 +35,7 @@ import (
"github.com/m3db/m3/src/metrics/metadata"
"github.com/m3db/m3/src/metrics/metric"
"github.com/m3db/m3/src/metrics/metric/aggregated"
+ "github.com/m3db/m3/src/metrics/metric/id"
metricid "github.com/m3db/m3/src/metrics/metric/id"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
"github.com/m3db/m3/src/metrics/policy"
@@ -100,18 +101,24 @@ func newUntimedEntryMetrics(scope tally.Scope) untimedEntryMetrics {
}
type timedEntryMetrics struct {
- rateLimit rateLimitEntryMetrics
- tooFarInTheFuture tally.Counter
- tooFarInThePast tally.Counter
- metadataUpdates tally.Counter
+ rateLimit rateLimitEntryMetrics
+ tooFarInTheFuture tally.Counter
+ tooFarInThePast tally.Counter
+ noPipelinesInMetadata tally.Counter
+ tombstonedMetadata tally.Counter
+ metadataUpdates tally.Counter
+ metadatasUpdates tally.Counter
}
func newTimedEntryMetrics(scope tally.Scope) timedEntryMetrics {
return timedEntryMetrics{
- rateLimit: newRateLimitEntryMetrics(scope),
- tooFarInTheFuture: scope.Counter("too-far-in-the-future"),
- tooFarInThePast: scope.Counter("too-far-in-the-past"),
- metadataUpdates: scope.Counter("metadata-updates"),
+ rateLimit: newRateLimitEntryMetrics(scope),
+ tooFarInTheFuture: scope.Counter("too-far-in-the-future"),
+ tooFarInThePast: scope.Counter("too-far-in-the-past"),
+ noPipelinesInMetadata: scope.Counter("no-pipelines-in-metadata"),
+ tombstonedMetadata: scope.Counter("tombstoned-metadata"),
+ metadataUpdates: scope.Counter("metadata-updates"),
+ metadatasUpdates: scope.Counter("metadatas-updates"),
}
}
@@ -254,7 +261,18 @@ func (e *Entry) AddTimed(
if err := e.applyValueRateLimit(1, e.metrics.timed.rateLimit); err != nil {
return err
}
- return e.addTimed(metric, metadata)
+ return e.addTimed(metric, metadata, nil)
+}
+
+// AddTimedWithStagedMetadatas adds a timed metric with staged metadatas.
+func (e *Entry) AddTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ metas metadata.StagedMetadatas,
+) error {
+ if err := e.applyValueRateLimit(1, e.metrics.timed.rateLimit); err != nil {
+ return err
+ }
+ return e.addTimed(metric, metadata.TimedMetadata{}, metas)
}
// AddForwarded adds a forwarded metric alongside its metadata.
@@ -414,13 +432,16 @@ func (e *Entry) addUntimed(
}
if e.shouldUpdateStagedMetadatasWithLock(sm) {
- if err = e.updateStagedMetadatasWithLock(metric, hasDefaultMetadatas, sm); err != nil {
+ err := e.updateStagedMetadatasWithLock(metric.ID, metric.Type,
+ hasDefaultMetadatas, sm)
+ if err != nil {
// NB(xichen): if an error occurred during policy update, the policies
// will remain as they are, i.e., there are no half-updated policies.
e.Unlock()
timeLock.RUnlock()
return err
}
+ e.metrics.untimed.metadatasUpdates.Inc(1)
}
err = e.addUntimedWithLock(currTime, metric)
@@ -565,12 +586,13 @@ func (e *Entry) removeOldAggregations(newAggregations aggregationValues) {
}
func (e *Entry) updateStagedMetadatasWithLock(
- metric unaggregated.MetricUnion,
+ metricID id.RawID,
+ metricType metric.Type,
hasDefaultMetadatas bool,
sm metadata.StagedMetadata,
) error {
var (
- elemID = e.maybeCopyIDWithLock(metric.ID)
+ elemID = e.maybeCopyIDWithLock(metricID)
newAggregations = make(aggregationValues, 0, initialAggregationCapacity)
)
@@ -588,7 +610,7 @@ func (e *Entry) updateStagedMetadatasWithLock(
resolution: storagePolicy.Resolution().Window,
}.toMetricListID()
var err error
- newAggregations, err = e.addNewAggregationKeyWithLock(metric.Type, elemID, key, listID, newAggregations)
+ newAggregations, err = e.addNewAggregationKeyWithLock(metricType, elemID, key, listID, newAggregations)
if err != nil {
return err
}
@@ -602,7 +624,6 @@ func (e *Entry) updateStagedMetadatasWithLock(
e.aggregations = newAggregations
e.hasDefaultMetadatas = hasDefaultMetadatas
e.cutoverNanos = sm.CutoverNanos
- e.metrics.untimed.metadatasUpdates.Inc(1)
return nil
}
@@ -620,6 +641,7 @@ func (e *Entry) addUntimedWithLock(timestamp time.Time, mu unaggregated.MetricUn
func (e *Entry) addTimed(
metric aggregated.Metric,
metadata metadata.TimedMetadata,
+ stagedMetadatas metadata.StagedMetadatas,
) error {
timeLock := e.opts.TimeLock()
timeLock.RLock()
@@ -651,6 +673,72 @@ func (e *Entry) addTimed(
return err
}
+ // Only apply processing of staged metadatas if has sent staged metadatas
+ // that isn't the default staged metadatas.
+ hasDefaultMetadatas := stagedMetadatas.IsDefault()
+ if len(stagedMetadatas) > 0 && !hasDefaultMetadatas {
+ sm, err := e.activeStagedMetadataWithLock(currTime, stagedMetadatas)
+ if err != nil {
+ e.RUnlock()
+ timeLock.RUnlock()
+ return err
+ }
+
+ // If the metadata indicates the (rollup) metric has been tombstoned, the metric is
+ // not ingested for aggregation. However, we do not update the policies asssociated
+ // with this entry and mark it tombstoned because there may be a different raw metric
+ // generating this same (rollup) metric that is actively emitting, meaning this entry
+ // may still be very much alive.
+ if sm.Tombstoned {
+ e.RUnlock()
+ timeLock.RUnlock()
+ e.metrics.timed.tombstonedMetadata.Inc(1)
+ return nil
+ }
+
+ // It is expected that there is at least one pipeline in the metadata.
+ if len(sm.Pipelines) == 0 {
+ e.RUnlock()
+ timeLock.RUnlock()
+ e.metrics.timed.noPipelinesInMetadata.Inc(1)
+ return errNoPipelinesInMetadata
+ }
+
+ if !e.shouldUpdateStagedMetadatasWithLock(sm) {
+ err = e.addTimedWithStagedMetadatasAndLock(metric)
+ e.RUnlock()
+ timeLock.RUnlock()
+ return err
+ }
+ e.RUnlock()
+
+ e.Lock()
+ if e.closed {
+ e.Unlock()
+ timeLock.RUnlock()
+ return errEntryClosed
+ }
+
+ if e.shouldUpdateStagedMetadatasWithLock(sm) {
+ err := e.updateStagedMetadatasWithLock(metric.ID, metric.Type,
+ hasDefaultMetadatas, sm)
+ if err != nil {
+ // NB(xichen): if an error occurred during policy update, the policies
+ // will remain as they are, i.e., there are no half-updated policies.
+ e.Unlock()
+ timeLock.RUnlock()
+ return err
+ }
+ e.metrics.timed.metadatasUpdates.Inc(1)
+ }
+
+ err = e.addTimedWithStagedMetadatasAndLock(metric)
+ e.Unlock()
+ timeLock.RUnlock()
+
+ return err
+ }
+
// Check if we should update metadata, and add metric if not.
key := aggregationKey{
aggregationID: metadata.AggregationID,
@@ -774,6 +862,19 @@ func (e *Entry) addTimedWithLock(
return value.elem.Value.(metricElem).AddValue(timestamp, metric.Value)
}
+func (e *Entry) addTimedWithStagedMetadatasAndLock(
+ metric aggregated.Metric,
+) error {
+ timestamp := time.Unix(0, metric.TimeNanos)
+ multiErr := xerrors.NewMultiError()
+ for _, val := range e.aggregations {
+ if err := val.elem.Value.(metricElem).AddValue(timestamp, metric.Value); err != nil {
+ multiErr = multiErr.Add(err)
+ }
+ }
+ return multiErr.FinalError()
+}
+
func (e *Entry) addForwarded(
metric aggregated.ForwardedMetric,
metadata metadata.ForwardMetadata,
diff --git a/src/aggregator/aggregator/entry_test.go b/src/aggregator/aggregator/entry_test.go
index 0d25154f8a..f03fb94b25 100644
--- a/src/aggregator/aggregator/entry_test.go
+++ b/src/aggregator/aggregator/entry_test.go
@@ -999,11 +999,11 @@ func TestEntryAddUntimedWithInvalidAggregationType(t *testing.T) {
}
}
-func TestEntryAddUntimedWithInvalidPipeline(t *testing.T) {
+func TestEntryAddUntimedWithNoRollup(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- invalidPipeline := metadata.PipelineMetadata{
+ pipelineNoRollup := metadata.PipelineMetadata{
Pipeline: applied.NewPipeline([]applied.OpUnion{
{
Type: pipeline.TransformationOpType,
@@ -1013,11 +1013,10 @@ func TestEntryAddUntimedWithInvalidPipeline(t *testing.T) {
}
e, _, _ := testEntry(ctrl, testEntryOptions{})
metadatas := metadata.StagedMetadatas{
- {Metadata: metadata.Metadata{Pipelines: []metadata.PipelineMetadata{invalidPipeline}}},
+ {Metadata: metadata.Metadata{Pipelines: []metadata.PipelineMetadata{pipelineNoRollup}}},
}
err := e.AddUntimed(testCounter, metadatas)
- require.Error(t, err)
- require.True(t, strings.Contains(err.Error(), "has no rollup operations"))
+ require.NoError(t, err)
}
func TestShouldUpdateStagedMetadataWithLock(t *testing.T) {
diff --git a/src/aggregator/aggregator/flush_times_mgr.go b/src/aggregator/aggregator/flush_times_mgr.go
index 361338e989..34f19f0b0d 100644
--- a/src/aggregator/aggregator/flush_times_mgr.go
+++ b/src/aggregator/aggregator/flush_times_mgr.go
@@ -91,10 +91,13 @@ type flushTimesManagerMetrics struct {
flushTimesPersist instrument.MethodMetrics
}
-func newFlushTimesManagerMetrics(scope tally.Scope) flushTimesManagerMetrics {
+func newFlushTimesManagerMetrics(
+ scope tally.Scope,
+ opts instrument.TimerOptions,
+) flushTimesManagerMetrics {
return flushTimesManagerMetrics{
flushTimesUnmarshalErrors: scope.Counter("flush-times-unmarshal-errors"),
- flushTimesPersist: instrument.NewMethodMetrics(scope, "flush-times-persist", 1.0),
+ flushTimesPersist: instrument.NewMethodMetrics(scope, "flush-times-persist", opts),
}
}
@@ -126,7 +129,8 @@ func NewFlushTimesManager(opts FlushTimesManagerOptions) FlushTimesManager {
flushTimesKeyFmt: opts.FlushTimesKeyFmt(),
flushTimesStore: opts.FlushTimesStore(),
flushTimesPersistRetrier: opts.FlushTimesPersistRetrier(),
- metrics: newFlushTimesManagerMetrics(instrumentOpts.MetricsScope()),
+ metrics: newFlushTimesManagerMetrics(instrumentOpts.MetricsScope(),
+ instrumentOpts.TimerOptions()),
}
mgr.Lock()
mgr.resetWithLock()
diff --git a/src/aggregator/aggregator/forwarded_writer.go b/src/aggregator/aggregator/forwarded_writer.go
index 5741c12530..882fc54aa3 100644
--- a/src/aggregator/aggregator/forwarded_writer.go
+++ b/src/aggregator/aggregator/forwarded_writer.go
@@ -91,10 +91,14 @@ type forwardedWriterMetrics struct {
unregisterAggregationNotFound tally.Counter
prepare tally.Counter
flushSuccess tally.Counter
- flushErrors tally.Counter
+ flushErrorsClient tally.Counter
}
func newForwardedWriterMetrics(scope tally.Scope) forwardedWriterMetrics {
+ const (
+ errorsName = "errors"
+ reasonTag = "reason"
+ )
registerScope := scope.Tagged(map[string]string{"action": "register"})
unregisterScope := scope.Tagged(map[string]string{"action": "unregister"})
prepareScope := scope.Tagged(map[string]string{"action": "prepare"})
@@ -102,21 +106,23 @@ func newForwardedWriterMetrics(scope tally.Scope) forwardedWriterMetrics {
return forwardedWriterMetrics{
registerSuccess: registerScope.Counter("success"),
registerWriterClosed: registerScope.Tagged(map[string]string{
- "reason": "writer-closed",
- }).Counter("errors"),
+ reasonTag: "writer-closed",
+ }).Counter(errorsName),
unregisterSuccess: unregisterScope.Counter("success"),
unregisterWriterClosed: unregisterScope.Tagged(map[string]string{
- "reason": "writer-closed",
- }).Counter("errors"),
+ reasonTag: "writer-closed",
+ }).Counter(errorsName),
unregisterMetricNotFound: unregisterScope.Tagged(map[string]string{
- "reason": "metric-not-found",
- }).Counter("errors"),
+ reasonTag: "metric-not-found",
+ }).Counter(errorsName),
unregisterAggregationNotFound: unregisterScope.Tagged(map[string]string{
- "reason": "aggregation-not-found",
- }).Counter("errors"),
+ reasonTag: "aggregation-not-found",
+ }).Counter(errorsName),
prepare: prepareScope.Counter("prepare"),
flushSuccess: flushScope.Counter("success"),
- flushErrors: flushScope.Counter("errors"),
+ flushErrorsClient: flushScope.Tagged(map[string]string{
+ reasonTag: "client-flush-error",
+ }).Counter(errorsName),
}
}
@@ -214,7 +220,7 @@ func (w *forwardedWriter) Prepare() {
func (w *forwardedWriter) Flush() error {
if err := w.client.Flush(); err != nil {
- w.metrics.flushErrors.Inc(1)
+ w.metrics.flushErrorsClient.Inc(1)
return err
}
w.metrics.flushSuccess.Inc(1)
diff --git a/src/aggregator/aggregator/gauge_elem_gen.go b/src/aggregator/aggregator/gauge_elem_gen.go
index 0ccbf80ca2..0a7de840b7 100644
--- a/src/aggregator/aggregator/gauge_elem_gen.go
+++ b/src/aggregator/aggregator/gauge_elem_gen.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -63,10 +63,10 @@ type GaugeElem struct {
elemBase
gaugeElemBase
- values []timedGauge // metric aggregations sorted by time in ascending order
- toConsume []timedGauge // small buffer to avoid memory allocations during consumption
- lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
- lastConsumedValues []float64 // last consumed values
+ values []timedGauge // metric aggregations sorted by time in ascending order
+ toConsume []timedGauge // small buffer to avoid memory allocations during consumption
+ lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
+ lastConsumedValues []transformation.Datapoint // last consumed values
}
// NewGaugeElem creates a new element for the given metric type.
@@ -132,11 +132,11 @@ func (e *GaugeElem) ResetSetData(
}
numAggTypes := len(e.aggTypes)
if cap(e.lastConsumedValues) < numAggTypes {
- e.lastConsumedValues = make([]float64, numAggTypes)
+ e.lastConsumedValues = make([]transformation.Datapoint, numAggTypes)
}
e.lastConsumedValues = e.lastConsumedValues[:numAggTypes]
for i := 0; i < len(e.lastConsumedValues); i++ {
- e.lastConsumedValues[i] = nan
+ e.lastConsumedValues[i] = transformation.Datapoint{Value: nan}
}
return nil
}
@@ -153,7 +153,7 @@ func (e *GaugeElem) AddUnion(timestamp time.Time, mu unaggregated.MetricUnion) e
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.AddUnion(mu)
+ lockedAgg.aggregation.AddUnion(timestamp, mu)
lockedAgg.Unlock()
return nil
}
@@ -170,7 +170,7 @@ func (e *GaugeElem) AddValue(timestamp time.Time, value float64) error {
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.Add(value)
+ lockedAgg.aggregation.Add(timestamp, value)
lockedAgg.Unlock()
return nil
}
@@ -196,7 +196,7 @@ func (e *GaugeElem) AddUnique(timestamp time.Time, values []float64, sourceID ui
}
lockedAgg.sourcesSeen.Set(source)
for _, v := range values {
- lockedAgg.aggregation.Add(v)
+ lockedAgg.aggregation.Add(timestamp, v)
}
lockedAgg.Unlock()
return nil
@@ -412,28 +412,52 @@ func (e *GaugeElem) processValueWithAggregationLock(
)
for aggTypeIdx, aggType := range e.aggTypes {
value := lockedAgg.aggregation.ValueOf(aggType)
- for i := 0; i < transformations.Len(); i++ {
- transformType := transformations.At(i).Transformation.Type
- if transformType.IsUnaryTransform() {
- fn := transformType.MustUnaryTransform()
- res := fn(transformation.Datapoint{TimeNanos: timeNanos, Value: value})
+ for _, transformOp := range transformations {
+ unaryOp, isUnaryOp := transformOp.UnaryTransform()
+ binaryOp, isBinaryOp := transformOp.BinaryTransform()
+ switch {
+ case isUnaryOp:
+ curr := transformation.Datapoint{
+ TimeNanos: timeNanos,
+ Value: value,
+ }
+
+ res := unaryOp.Evaluate(curr)
+
value = res.Value
- } else {
- fn := transformType.MustBinaryTransform()
- prev := transformation.Datapoint{TimeNanos: e.lastConsumedAtNanos, Value: e.lastConsumedValues[aggTypeIdx]}
- curr := transformation.Datapoint{TimeNanos: timeNanos, Value: value}
- res := fn(prev, curr)
+
+ case isBinaryOp:
+ lastTimeNanos := e.lastConsumedAtNanos
+ prev := transformation.Datapoint{
+ TimeNanos: lastTimeNanos,
+ Value: e.lastConsumedValues[aggTypeIdx].Value,
+ }
+
+ currTimeNanos := timeNanos
+ curr := transformation.Datapoint{
+ TimeNanos: currTimeNanos,
+ Value: value,
+ }
+
+ res := binaryOp.Evaluate(prev, curr)
+
// NB: we only need to record the value needed for derivative transformations.
// We currently only support first-order derivative transformations so we only
// need to keep one value. In the future if we need to support higher-order
// derivative transformations, we need to store an array of values here.
- e.lastConsumedValues[aggTypeIdx] = value
+ if !math.IsNaN(curr.Value) {
+ e.lastConsumedValues[aggTypeIdx] = curr
+ }
+
value = res.Value
+
}
}
+
if discardNaNValues && math.IsNaN(value) {
continue
}
+
if !e.parsedPipeline.HasRollup {
switch e.idPrefixSuffixType {
case NoPrefixNoSuffix:
diff --git a/src/aggregator/aggregator/generic_elem.go b/src/aggregator/aggregator/generic_elem.go
index a0f61ccab3..f5c57e2a54 100644
--- a/src/aggregator/aggregator/generic_elem.go
+++ b/src/aggregator/aggregator/generic_elem.go
@@ -43,14 +43,17 @@ type typeSpecificAggregation interface {
generic.Type
// Add adds a new metric value.
- Add(value float64)
+ Add(t time.Time, value float64)
// AddUnion adds a new metric value union.
- AddUnion(mu unaggregated.MetricUnion)
+ AddUnion(t time.Time, mu unaggregated.MetricUnion)
// ValueOf returns the value for the given aggregation type.
ValueOf(aggType maggregation.Type) float64
+ // LastAt returns the time for last received value.
+ LastAt() time.Time
+
// Close closes the aggregation object.
Close()
}
@@ -117,10 +120,10 @@ type GenericElem struct {
elemBase
typeSpecificElemBase
- values []timedAggregation // metric aggregations sorted by time in ascending order
- toConsume []timedAggregation // small buffer to avoid memory allocations during consumption
- lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
- lastConsumedValues []float64 // last consumed values
+ values []timedAggregation // metric aggregations sorted by time in ascending order
+ toConsume []timedAggregation // small buffer to avoid memory allocations during consumption
+ lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
+ lastConsumedValues []transformation.Datapoint // last consumed values
}
// NewGenericElem creates a new element for the given metric type.
@@ -186,11 +189,11 @@ func (e *GenericElem) ResetSetData(
}
numAggTypes := len(e.aggTypes)
if cap(e.lastConsumedValues) < numAggTypes {
- e.lastConsumedValues = make([]float64, numAggTypes)
+ e.lastConsumedValues = make([]transformation.Datapoint, numAggTypes)
}
e.lastConsumedValues = e.lastConsumedValues[:numAggTypes]
for i := 0; i < len(e.lastConsumedValues); i++ {
- e.lastConsumedValues[i] = nan
+ e.lastConsumedValues[i] = transformation.Datapoint{Value: nan}
}
return nil
}
@@ -207,7 +210,7 @@ func (e *GenericElem) AddUnion(timestamp time.Time, mu unaggregated.MetricUnion)
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.AddUnion(mu)
+ lockedAgg.aggregation.AddUnion(timestamp, mu)
lockedAgg.Unlock()
return nil
}
@@ -224,7 +227,7 @@ func (e *GenericElem) AddValue(timestamp time.Time, value float64) error {
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.Add(value)
+ lockedAgg.aggregation.Add(timestamp, value)
lockedAgg.Unlock()
return nil
}
@@ -250,7 +253,7 @@ func (e *GenericElem) AddUnique(timestamp time.Time, values []float64, sourceID
}
lockedAgg.sourcesSeen.Set(source)
for _, v := range values {
- lockedAgg.aggregation.Add(v)
+ lockedAgg.aggregation.Add(timestamp, v)
}
lockedAgg.Unlock()
return nil
@@ -466,28 +469,52 @@ func (e *GenericElem) processValueWithAggregationLock(
)
for aggTypeIdx, aggType := range e.aggTypes {
value := lockedAgg.aggregation.ValueOf(aggType)
- for i := 0; i < transformations.Len(); i++ {
- transformType := transformations.At(i).Transformation.Type
- if transformType.IsUnaryTransform() {
- fn := transformType.MustUnaryTransform()
- res := fn(transformation.Datapoint{TimeNanos: timeNanos, Value: value})
+ for _, transformOp := range transformations {
+ unaryOp, isUnaryOp := transformOp.UnaryTransform()
+ binaryOp, isBinaryOp := transformOp.BinaryTransform()
+ switch {
+ case isUnaryOp:
+ curr := transformation.Datapoint{
+ TimeNanos: timeNanos,
+ Value: value,
+ }
+
+ res := unaryOp.Evaluate(curr)
+
value = res.Value
- } else {
- fn := transformType.MustBinaryTransform()
- prev := transformation.Datapoint{TimeNanos: e.lastConsumedAtNanos, Value: e.lastConsumedValues[aggTypeIdx]}
- curr := transformation.Datapoint{TimeNanos: timeNanos, Value: value}
- res := fn(prev, curr)
+
+ case isBinaryOp:
+ lastTimeNanos := e.lastConsumedAtNanos
+ prev := transformation.Datapoint{
+ TimeNanos: lastTimeNanos,
+ Value: e.lastConsumedValues[aggTypeIdx].Value,
+ }
+
+ currTimeNanos := timeNanos
+ curr := transformation.Datapoint{
+ TimeNanos: currTimeNanos,
+ Value: value,
+ }
+
+ res := binaryOp.Evaluate(prev, curr)
+
// NB: we only need to record the value needed for derivative transformations.
// We currently only support first-order derivative transformations so we only
// need to keep one value. In the future if we need to support higher-order
// derivative transformations, we need to store an array of values here.
- e.lastConsumedValues[aggTypeIdx] = value
+ if !math.IsNaN(curr.Value) {
+ e.lastConsumedValues[aggTypeIdx] = curr
+ }
+
value = res.Value
+
}
}
+
if discardNaNValues && math.IsNaN(value) {
continue
}
+
if !e.parsedPipeline.HasRollup {
switch e.idPrefixSuffixType {
case NoPrefixNoSuffix:
diff --git a/src/aggregator/aggregator/handler/config.go b/src/aggregator/aggregator/handler/config.go
index d631e68e5f..f929ae4942 100644
--- a/src/aggregator/aggregator/handler/config.go
+++ b/src/aggregator/aggregator/handler/config.go
@@ -33,6 +33,7 @@ import (
"github.com/m3db/m3/src/msg/producer"
"github.com/m3db/m3/src/msg/producer/config"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"go.uber.org/zap"
@@ -53,6 +54,7 @@ type FlushHandlerConfiguration struct {
func (c FlushHandlerConfiguration) NewHandler(
cs client.Client,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (Handler, error) {
if len(c.Handlers) == 0 {
return nil, errNoHandlerConfiguration
@@ -61,7 +63,7 @@ func (c FlushHandlerConfiguration) NewHandler(
handlers = make([]Handler, 0, len(c.Handlers))
)
for _, hc := range c.Handlers {
- handler, err := hc.newHandler(cs, instrumentOpts)
+ handler, err := hc.newHandler(cs, instrumentOpts, rwOpts)
if err != nil {
return nil, err
}
@@ -110,6 +112,7 @@ type flushHandlerConfiguration struct {
func (c flushHandlerConfiguration) newHandler(
cs client.Client,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (Handler, error) {
if err := c.Validate(); err != nil {
return nil, err
@@ -118,6 +121,7 @@ func (c flushHandlerConfiguration) newHandler(
return c.DynamicBackend.newProtobufHandler(
cs,
instrumentOpts,
+ rwOpts,
)
}
switch c.StaticBackend.Type {
@@ -163,13 +167,14 @@ type dynamicBackendConfiguration struct {
func (c *dynamicBackendConfiguration) newProtobufHandler(
cs client.Client,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (Handler, error) {
scope := instrumentOpts.MetricsScope().Tagged(map[string]string{
"backend": c.Name,
"component": "producer",
})
instrumentOpts = instrumentOpts.SetMetricsScope(scope)
- p, err := c.Producer.NewProducer(cs, instrumentOpts)
+ p, err := c.Producer.NewProducer(cs, instrumentOpts, rwOpts)
if err != nil {
return nil, err
}
diff --git a/src/aggregator/aggregator/handler/writer/sharded.go b/src/aggregator/aggregator/handler/writer/sharded.go
new file mode 100644
index 0000000000..6bfbf58963
--- /dev/null
+++ b/src/aggregator/aggregator/handler/writer/sharded.go
@@ -0,0 +1,154 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package writer
+
+import (
+ "sync"
+
+ "github.com/m3db/m3/src/aggregator/sharding"
+ "github.com/m3db/m3/src/metrics/metric/aggregated"
+ xerrors "github.com/m3db/m3/src/x/errors"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/pkg/errors"
+)
+
+var (
+ errShardedWriterNoWriters = errors.New("no backing writers provided")
+ errShardedWriterClosed = errors.New("sharded writer closed")
+)
+
+type shardedWriter struct {
+ mutex sync.RWMutex
+ closed bool
+ writers []*threadsafeWriter
+ shardFn sharding.AggregatedShardFn
+ numShards int
+}
+
+var _ Writer = &shardedWriter{}
+
+// NewShardedWriter shards writes to the provided writers with the given sharding fn.
+func NewShardedWriter(
+ writers []Writer,
+ shardFn sharding.AggregatedShardFn,
+ iOpts instrument.Options,
+) (Writer, error) {
+ if len(writers) == 0 {
+ return nil, errShardedWriterNoWriters
+ }
+
+ threadsafeWriters := make([]*threadsafeWriter, 0, len(writers))
+ for _, w := range writers {
+ threadsafeWriters = append(threadsafeWriters, &threadsafeWriter{
+ writer: w,
+ })
+ }
+
+ return &shardedWriter{
+ numShards: len(writers),
+ writers: threadsafeWriters,
+ shardFn: shardFn,
+ }, nil
+}
+
+func (w *shardedWriter) Write(mp aggregated.ChunkedMetricWithStoragePolicy) error {
+ w.mutex.RLock()
+ if w.closed {
+ w.mutex.RUnlock()
+ return errShardedWriterClosed
+ }
+
+ shardID := w.shardFn(mp.ChunkedID, w.numShards)
+ writerErr := w.writers[shardID].Write(mp)
+ w.mutex.RUnlock()
+
+ return writerErr
+}
+
+func (w *shardedWriter) Flush() error {
+ w.mutex.RLock()
+ defer w.mutex.RUnlock()
+
+ if w.closed {
+ return errShardedWriterClosed
+ }
+
+ var multiErr xerrors.MultiError
+ for i := 0; i < w.numShards; i++ {
+ multiErr = multiErr.Add(w.writers[i].Flush())
+ }
+
+ if multiErr.Empty() {
+ return nil
+ }
+
+ return errors.WithMessage(multiErr.FinalError(), "failed to flush sharded writer")
+}
+
+func (w *shardedWriter) Close() error {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+
+ if w.closed {
+ return errShardedWriterClosed
+ }
+ w.closed = true
+
+ var multiErr xerrors.MultiError
+ for i := 0; i < w.numShards; i++ {
+ multiErr = multiErr.Add(w.writers[i].Close())
+ }
+
+ if multiErr.Empty() {
+ return nil
+ }
+
+ return errors.WithMessage(multiErr.FinalError(), "failed to close sharded writer")
+}
+
+type threadsafeWriter struct {
+ mutex sync.Mutex
+ writer Writer
+}
+
+var _ Writer = &threadsafeWriter{}
+
+func (w *threadsafeWriter) Write(mp aggregated.ChunkedMetricWithStoragePolicy) error {
+ w.mutex.Lock()
+ err := w.writer.Write(mp)
+ w.mutex.Unlock()
+ return err
+}
+
+func (w *threadsafeWriter) Flush() error {
+ w.mutex.Lock()
+ err := w.writer.Flush()
+ w.mutex.Unlock()
+ return err
+}
+
+func (w *threadsafeWriter) Close() error {
+ w.mutex.Lock()
+ err := w.writer.Close()
+ w.mutex.Unlock()
+ return err
+}
diff --git a/src/aggregator/aggregator/handler/writer/sharded_test.go b/src/aggregator/aggregator/handler/writer/sharded_test.go
new file mode 100644
index 0000000000..a07e4d288d
--- /dev/null
+++ b/src/aggregator/aggregator/handler/writer/sharded_test.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package writer
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/metrics/metric/aggregated"
+ "github.com/m3db/m3/src/metrics/metric/id"
+ "github.com/m3db/m3/src/x/instrument"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewShardedWriter(t *testing.T) {
+ ctrl := gomock.NewController(xtest.Reporter{t})
+ defer ctrl.Finish()
+
+ w1, w2 := NewMockWriter(ctrl), NewMockWriter(ctrl)
+ writers := []Writer{w1, w2}
+
+ shardFn := func(_ id.ChunkedID, _ int) uint32 {
+ return 0
+ }
+
+ w, err := NewShardedWriter(writers, shardFn, instrument.NewOptions())
+ require.NoError(t, err)
+
+ metric := aggregated.ChunkedMetricWithStoragePolicy{
+ ChunkedMetric: aggregated.ChunkedMetric{
+ ChunkedID: id.ChunkedID{
+ Data: []byte("some-random-id"),
+ },
+ },
+ }
+
+ w1.EXPECT().Write(metric).Return(nil)
+ require.NoError(t, w.Write(metric))
+
+ w1.EXPECT().Flush().Return(nil)
+ w2.EXPECT().Flush().Return(nil)
+ require.NoError(t, w.Flush())
+
+ w1.EXPECT().Close().Return(nil)
+ w2.EXPECT().Close().Return(nil)
+ require.NoError(t, w.Close())
+}
diff --git a/src/aggregator/aggregator/map.go b/src/aggregator/aggregator/map.go
index cc5d0f3877..130c20d680 100644
--- a/src/aggregator/aggregator/map.go
+++ b/src/aggregator/aggregator/map.go
@@ -178,6 +178,24 @@ func (m *metricMap) AddTimed(
return err
}
+func (m *metricMap) AddTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ metas metadata.StagedMetadatas,
+) error {
+ key := entryKey{
+ metricCategory: timedMetric,
+ metricType: metric.Type,
+ idHash: hash.Murmur3Hash128(metric.ID),
+ }
+ entry, err := m.findOrCreate(key)
+ if err != nil {
+ return err
+ }
+ err = entry.AddTimedWithStagedMetadatas(metric, metas)
+ entry.DecWriter()
+ return err
+}
+
func (m *metricMap) AddForwarded(
metric aggregated.ForwardedMetric,
metadata metadata.ForwardMetadata,
diff --git a/src/aggregator/aggregator/options.go b/src/aggregator/aggregator/options.go
index c511fb0bce..006801f365 100644
--- a/src/aggregator/aggregator/options.go
+++ b/src/aggregator/aggregator/options.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/aggregator/aggregation/quantile/cm"
"github.com/m3db/m3/src/aggregator/aggregator/handler"
+ "github.com/m3db/m3/src/aggregator/aggregator/handler/writer"
"github.com/m3db/m3/src/aggregator/client"
"github.com/m3db/m3/src/aggregator/runtime"
"github.com/m3db/m3/src/aggregator/sharding"
@@ -199,6 +200,12 @@ type Options interface {
// FlushHandler returns the handler that flushes buffered encoders.
FlushHandler() handler.Handler
+ // SetPassthroughWriter sets the writer for passthrough metrics.
+ SetPassthroughWriter(value writer.Writer) Options
+
+ // PassthroughWriter returns the writer for passthrough metrics.
+ PassthroughWriter() writer.Writer
+
// SetEntryTTL sets the ttl for expiring stale entries.
SetEntryTTL(value time.Duration) Options
@@ -328,6 +335,7 @@ type options struct {
bufferDurationAfterShardCutoff time.Duration
flushManager FlushManager
flushHandler handler.Handler
+ passthroughWriter writer.Writer
entryTTL time.Duration
entryCheckInterval time.Duration
entryCheckBatchPercent float64
@@ -374,6 +382,7 @@ func NewOptions() Options {
shardFn: sharding.Murmur32Hash.MustShardFn(),
bufferDurationBeforeShardCutover: defaultBufferDurationBeforeShardCutover,
bufferDurationAfterShardCutoff: defaultBufferDurationAfterShardCutoff,
+ passthroughWriter: writer.NewBlackholeWriter(),
entryTTL: defaultEntryTTL,
entryCheckInterval: defaultEntryCheckInterval,
entryCheckBatchPercent: defaultEntryCheckBatchPercent,
@@ -591,6 +600,16 @@ func (o *options) FlushHandler() handler.Handler {
return o.flushHandler
}
+func (o *options) SetPassthroughWriter(value writer.Writer) Options {
+ opts := *o
+ opts.passthroughWriter = value
+ return &opts
+}
+
+func (o *options) PassthroughWriter() writer.Writer {
+ return o.passthroughWriter
+}
+
func (o *options) SetEntryTTL(value time.Duration) Options {
opts := *o
opts.entryTTL = value
diff --git a/src/aggregator/aggregator/options_test.go b/src/aggregator/aggregator/options_test.go
index 75c5545a71..3bb5d446d4 100644
--- a/src/aggregator/aggregator/options_test.go
+++ b/src/aggregator/aggregator/options_test.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/aggregator/aggregation/quantile/cm"
"github.com/m3db/m3/src/aggregator/aggregator/handler"
+ "github.com/m3db/m3/src/aggregator/aggregator/handler/writer"
"github.com/m3db/m3/src/aggregator/client"
"github.com/m3db/m3/src/aggregator/runtime"
"github.com/m3db/m3/src/x/clock"
@@ -123,7 +124,10 @@ func TestSetStreamOptions(t *testing.T) {
}
func TestSetAdminClient(t *testing.T) {
- value := client.NewClient(client.NewOptions()).(client.AdminClient)
+ c, err := client.NewClient(client.NewOptions())
+ require.NoError(t, err)
+ value, ok := c.(client.AdminClient)
+ require.True(t, ok)
o := NewOptions().SetAdminClient(value)
require.True(t, value == o.AdminClient())
}
@@ -149,6 +153,15 @@ func TestSetFlushHandler(t *testing.T) {
require.Equal(t, h, o.FlushHandler())
}
+func TestSetPassthroughWriter(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ w := writer.NewMockWriter(ctrl)
+ o := NewOptions().SetPassthroughWriter(w)
+ require.Equal(t, w, o.PassthroughWriter())
+}
+
func TestSetEntryTTL(t *testing.T) {
value := time.Minute
o := NewOptions().SetEntryTTL(value)
diff --git a/src/aggregator/aggregator/shard.go b/src/aggregator/aggregator/shard.go
index 1acef7f3f8..712f717568 100644
--- a/src/aggregator/aggregator/shard.go
+++ b/src/aggregator/aggregator/shard.go
@@ -50,6 +50,11 @@ type addTimedFn func(
metadata metadata.TimedMetadata,
) error
+type addTimedWithStagedMetadatasFn func(
+ metric aggregated.Metric,
+ metas metadata.StagedMetadatas,
+) error
+
type addForwardedFn func(
metric aggregated.ForwardedMetric,
metadata metadata.ForwardMetadata,
@@ -80,12 +85,13 @@ type aggregatorShard struct {
earliestWritableNanos int64
latestWriteableNanos int64
- closed bool
- metricMap *metricMap
- metrics aggregatorShardMetrics
- addUntimedFn addUntimedFn
- addTimedFn addTimedFn
- addForwardedFn addForwardedFn
+ closed bool
+ metricMap *metricMap
+ metrics aggregatorShardMetrics
+ addUntimedFn addUntimedFn
+ addTimedFn addTimedFn
+ addTimedWithStagedMetadatasFn addTimedWithStagedMetadatasFn
+ addForwardedFn addForwardedFn
}
func newAggregatorShard(shard uint32, opts Options) *aggregatorShard {
@@ -97,8 +103,8 @@ func newAggregatorShard(shard uint32, opts Options) *aggregatorShard {
map[string]string{"shard": strconv.Itoa(int(shard))},
)
s := &aggregatorShard{
- shard: shard,
- nowFn: opts.ClockOptions().NowFn(),
+ shard: shard,
+ nowFn: opts.ClockOptions().NowFn(),
bufferDurationBeforeShardCutover: opts.BufferDurationBeforeShardCutover(),
bufferDurationAfterShardCutoff: opts.BufferDurationAfterShardCutoff(),
metricMap: newMetricMap(shard, opts),
@@ -106,6 +112,7 @@ func newAggregatorShard(shard uint32, opts Options) *aggregatorShard {
}
s.addUntimedFn = s.metricMap.AddUntimed
s.addTimedFn = s.metricMap.AddTimed
+ s.addTimedWithStagedMetadatasFn = s.metricMap.AddTimedWithStagedMetadatas
s.addForwardedFn = s.metricMap.AddForwarded
return s
}
@@ -201,6 +208,29 @@ func (s *aggregatorShard) AddTimed(
return nil
}
+func (s *aggregatorShard) AddTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ metas metadata.StagedMetadatas,
+) error {
+ s.RLock()
+ if s.closed {
+ s.RUnlock()
+ return errAggregatorShardClosed
+ }
+ if !s.isWritableWithLock() {
+ s.RUnlock()
+ s.metrics.notWriteableErrors.Inc(1)
+ return errAggregatorShardNotWriteable
+ }
+ err := s.addTimedWithStagedMetadatasFn(metric, metas)
+ s.RUnlock()
+ if err != nil {
+ return err
+ }
+ s.metrics.writeSucccess.Inc(1)
+ return nil
+}
+
func (s *aggregatorShard) AddForwarded(
metric aggregated.ForwardedMetric,
metadata metadata.ForwardMetadata,
diff --git a/src/aggregator/aggregator/timer_elem_gen.go b/src/aggregator/aggregator/timer_elem_gen.go
index e8e28014f5..a40370f3ac 100644
--- a/src/aggregator/aggregator/timer_elem_gen.go
+++ b/src/aggregator/aggregator/timer_elem_gen.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -63,10 +63,10 @@ type TimerElem struct {
elemBase
timerElemBase
- values []timedTimer // metric aggregations sorted by time in ascending order
- toConsume []timedTimer // small buffer to avoid memory allocations during consumption
- lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
- lastConsumedValues []float64 // last consumed values
+ values []timedTimer // metric aggregations sorted by time in ascending order
+ toConsume []timedTimer // small buffer to avoid memory allocations during consumption
+ lastConsumedAtNanos int64 // last consumed at in Unix nanoseconds
+ lastConsumedValues []transformation.Datapoint // last consumed values
}
// NewTimerElem creates a new element for the given metric type.
@@ -132,11 +132,11 @@ func (e *TimerElem) ResetSetData(
}
numAggTypes := len(e.aggTypes)
if cap(e.lastConsumedValues) < numAggTypes {
- e.lastConsumedValues = make([]float64, numAggTypes)
+ e.lastConsumedValues = make([]transformation.Datapoint, numAggTypes)
}
e.lastConsumedValues = e.lastConsumedValues[:numAggTypes]
for i := 0; i < len(e.lastConsumedValues); i++ {
- e.lastConsumedValues[i] = nan
+ e.lastConsumedValues[i] = transformation.Datapoint{Value: nan}
}
return nil
}
@@ -153,7 +153,7 @@ func (e *TimerElem) AddUnion(timestamp time.Time, mu unaggregated.MetricUnion) e
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.AddUnion(mu)
+ lockedAgg.aggregation.AddUnion(timestamp, mu)
lockedAgg.Unlock()
return nil
}
@@ -170,7 +170,7 @@ func (e *TimerElem) AddValue(timestamp time.Time, value float64) error {
lockedAgg.Unlock()
return errAggregationClosed
}
- lockedAgg.aggregation.Add(value)
+ lockedAgg.aggregation.Add(timestamp, value)
lockedAgg.Unlock()
return nil
}
@@ -196,7 +196,7 @@ func (e *TimerElem) AddUnique(timestamp time.Time, values []float64, sourceID ui
}
lockedAgg.sourcesSeen.Set(source)
for _, v := range values {
- lockedAgg.aggregation.Add(v)
+ lockedAgg.aggregation.Add(timestamp, v)
}
lockedAgg.Unlock()
return nil
@@ -412,28 +412,52 @@ func (e *TimerElem) processValueWithAggregationLock(
)
for aggTypeIdx, aggType := range e.aggTypes {
value := lockedAgg.aggregation.ValueOf(aggType)
- for i := 0; i < transformations.Len(); i++ {
- transformType := transformations.At(i).Transformation.Type
- if transformType.IsUnaryTransform() {
- fn := transformType.MustUnaryTransform()
- res := fn(transformation.Datapoint{TimeNanos: timeNanos, Value: value})
+ for _, transformOp := range transformations {
+ unaryOp, isUnaryOp := transformOp.UnaryTransform()
+ binaryOp, isBinaryOp := transformOp.BinaryTransform()
+ switch {
+ case isUnaryOp:
+ curr := transformation.Datapoint{
+ TimeNanos: timeNanos,
+ Value: value,
+ }
+
+ res := unaryOp.Evaluate(curr)
+
value = res.Value
- } else {
- fn := transformType.MustBinaryTransform()
- prev := transformation.Datapoint{TimeNanos: e.lastConsumedAtNanos, Value: e.lastConsumedValues[aggTypeIdx]}
- curr := transformation.Datapoint{TimeNanos: timeNanos, Value: value}
- res := fn(prev, curr)
+
+ case isBinaryOp:
+ lastTimeNanos := e.lastConsumedAtNanos
+ prev := transformation.Datapoint{
+ TimeNanos: lastTimeNanos,
+ Value: e.lastConsumedValues[aggTypeIdx].Value,
+ }
+
+ currTimeNanos := timeNanos
+ curr := transformation.Datapoint{
+ TimeNanos: currTimeNanos,
+ Value: value,
+ }
+
+ res := binaryOp.Evaluate(prev, curr)
+
// NB: we only need to record the value needed for derivative transformations.
// We currently only support first-order derivative transformations so we only
// need to keep one value. In the future if we need to support higher-order
// derivative transformations, we need to store an array of values here.
- e.lastConsumedValues[aggTypeIdx] = value
+ if !math.IsNaN(curr.Value) {
+ e.lastConsumedValues[aggTypeIdx] = curr
+ }
+
value = res.Value
+
}
}
+
if discardNaNValues && math.IsNaN(value) {
continue
}
+
if !e.parsedPipeline.HasRollup {
switch e.idPrefixSuffixType {
case NoPrefixNoSuffix:
diff --git a/src/aggregator/client/client.go b/src/aggregator/client/client.go
index 8900ec505c..34293e664a 100644
--- a/src/aggregator/client/client.go
+++ b/src/aggregator/client/client.go
@@ -30,10 +30,14 @@ import (
"github.com/m3db/m3/src/aggregator/sharding"
"github.com/m3db/m3/src/cluster/placement"
"github.com/m3db/m3/src/cluster/shard"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/metadata"
+ "github.com/m3db/m3/src/metrics/metric"
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/id"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/msg/producer"
"github.com/m3db/m3/src/x/clock"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/instrument"
@@ -75,6 +79,18 @@ type Client interface {
metadata metadata.TimedMetadata,
) error
+ // WritePassthrough writes passthrough metrics.
+ WritePassthrough(
+ metric aggregated.Metric,
+ storagePolicy policy.StoragePolicy,
+ ) error
+
+ // WriteTimedWithStagedMetadatas writes timed metrics with staged metadatas.
+ WriteTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ metadatas metadata.StagedMetadatas,
+ ) error
+
// Flush flushes any remaining data buffered by the client.
Flush() error
@@ -107,19 +123,24 @@ type clientMetrics struct {
writeUntimedCounter instrument.MethodMetrics
writeUntimedBatchTimer instrument.MethodMetrics
writeUntimedGauge instrument.MethodMetrics
+ writePassthrough instrument.MethodMetrics
writeForwarded instrument.MethodMetrics
flush instrument.MethodMetrics
shardNotOwned tally.Counter
shardNotWriteable tally.Counter
}
-func newClientMetrics(scope tally.Scope, sampleRate float64) clientMetrics {
+func newClientMetrics(
+ scope tally.Scope,
+ opts instrument.TimerOptions,
+) clientMetrics {
return clientMetrics{
- writeUntimedCounter: instrument.NewMethodMetrics(scope, "writeUntimedCounter", sampleRate),
- writeUntimedBatchTimer: instrument.NewMethodMetrics(scope, "writeUntimedBatchTimer", sampleRate),
- writeUntimedGauge: instrument.NewMethodMetrics(scope, "writeUntimedGauge", sampleRate),
- writeForwarded: instrument.NewMethodMetrics(scope, "writeForwarded", sampleRate),
- flush: instrument.NewMethodMetrics(scope, "flush", sampleRate),
+ writeUntimedCounter: instrument.NewMethodMetrics(scope, "writeUntimedCounter", opts),
+ writeUntimedBatchTimer: instrument.NewMethodMetrics(scope, "writeUntimedBatchTimer", opts),
+ writeUntimedGauge: instrument.NewMethodMetrics(scope, "writeUntimedGauge", opts),
+ writePassthrough: instrument.NewMethodMetrics(scope, "writePassthrough", opts),
+ writeForwarded: instrument.NewMethodMetrics(scope, "writeForwarded", opts),
+ flush: instrument.NewMethodMetrics(scope, "flush", opts),
shardNotOwned: scope.Counter("shard-not-owned"),
shardNotWriteable: scope.Counter("shard-not-writeable"),
}
@@ -129,52 +150,96 @@ func newClientMetrics(scope tally.Scope, sampleRate float64) clientMetrics {
type client struct {
sync.RWMutex
- opts Options
+ opts Options
+ aggregatorClientType AggregatorClientType
+ state clientState
+
+ m3msg m3msgClient
+
nowFn clock.NowFn
shardCutoverWarmupDuration time.Duration
shardCutoffLingerDuration time.Duration
writerMgr instanceWriterManager
shardFn sharding.ShardFn
placementWatcher placement.StagedPlacementWatcher
- state clientState
- metrics clientMetrics
+
+ metrics clientMetrics
+}
+
+type m3msgClient struct {
+ producer producer.Producer
+ numShards uint32
+ messagePool *messagePool
}
// NewClient creates a new client.
-func NewClient(opts Options) Client {
+func NewClient(opts Options) (Client, error) {
+ if err := opts.Validate(); err != nil {
+ return nil, err
+ }
+
var (
- instrumentOpts = opts.InstrumentOptions()
- writerMgrScope = instrumentOpts.MetricsScope().SubScope("writer-manager")
- writerMgrOpts = opts.SetInstrumentOptions(instrumentOpts.SetMetricsScope(writerMgrScope))
- writerMgr = newInstanceWriterManager(writerMgrOpts)
+ clientType = opts.AggregatorClientType()
+ instrumentOpts = opts.InstrumentOptions()
+ msgClient m3msgClient
+ writerMgr instanceWriterManager
+ placementWatcher placement.StagedPlacementWatcher
)
- onPlacementsAddedFn := func(placements []placement.Placement) {
- for _, placement := range placements {
- writerMgr.AddInstances(placement.Instances()) // nolint: errcheck
+ switch clientType {
+ case M3MsgAggregatorClient:
+ m3msgOpts := opts.M3MsgOptions()
+ if err := m3msgOpts.Validate(); err != nil {
+ return nil, err
}
- }
- onPlacementsRemovedFn := func(placements []placement.Placement) {
- for _, placement := range placements {
- writerMgr.RemoveInstances(placement.Instances()) // nolint: errcheck
+
+ producer := m3msgOpts.Producer()
+ if err := producer.Init(); err != nil {
+ return nil, err
}
+
+ msgClient = m3msgClient{
+ producer: producer,
+ numShards: producer.NumShards(),
+ messagePool: newMessagePool(),
+ }
+ case LegacyAggregatorClient:
+ writerMgrScope := instrumentOpts.MetricsScope().SubScope("writer-manager")
+ writerMgrOpts := opts.SetInstrumentOptions(instrumentOpts.SetMetricsScope(writerMgrScope))
+ writerMgr = newInstanceWriterManager(writerMgrOpts)
+ onPlacementsAddedFn := func(placements []placement.Placement) {
+ for _, placement := range placements {
+ writerMgr.AddInstances(placement.Instances()) // nolint: errcheck
+ }
+ }
+ onPlacementsRemovedFn := func(placements []placement.Placement) {
+ for _, placement := range placements {
+ writerMgr.RemoveInstances(placement.Instances()) // nolint: errcheck
+ }
+ }
+ activeStagedPlacementOpts := placement.NewActiveStagedPlacementOptions().
+ SetClockOptions(opts.ClockOptions()).
+ SetOnPlacementsAddedFn(onPlacementsAddedFn).
+ SetOnPlacementsRemovedFn(onPlacementsRemovedFn)
+ placementWatcherOpts := opts.StagedPlacementWatcherOptions().
+ SetActiveStagedPlacementOptions(activeStagedPlacementOpts)
+ placementWatcher = placement.NewStagedPlacementWatcher(placementWatcherOpts)
+ default:
+ return nil, fmt.Errorf("unrecognized client type: %v", clientType)
}
- activeStagedPlacementOpts := placement.NewActiveStagedPlacementOptions().
- SetClockOptions(opts.ClockOptions()).
- SetOnPlacementsAddedFn(onPlacementsAddedFn).
- SetOnPlacementsRemovedFn(onPlacementsRemovedFn)
- placementWatcherOpts := opts.StagedPlacementWatcherOptions().SetActiveStagedPlacementOptions(activeStagedPlacementOpts)
- placementWatcher := placement.NewStagedPlacementWatcher(placementWatcherOpts)
return &client{
- opts: opts,
- nowFn: opts.ClockOptions().NowFn(),
+ aggregatorClientType: clientType,
+ m3msg: msgClient,
+ opts: opts,
+ nowFn: opts.ClockOptions().NowFn(),
shardCutoverWarmupDuration: opts.ShardCutoverWarmupDuration(),
shardCutoffLingerDuration: opts.ShardCutoffLingerDuration(),
writerMgr: writerMgr,
shardFn: opts.ShardFn(),
placementWatcher: placementWatcher,
- metrics: newClientMetrics(instrumentOpts.MetricsScope(), instrumentOpts.MetricsSamplingRate()),
- }
+ metrics: newClientMetrics(instrumentOpts.MetricsScope(),
+ instrumentOpts.TimerOptions()),
+ }, nil
}
func (c *client) Init() error {
@@ -184,8 +249,20 @@ func (c *client) Init() error {
if c.state != clientUninitialized {
return errClientIsInitializedOrClosed
}
+
+ switch c.aggregatorClientType {
+ case M3MsgAggregatorClient:
+ // Nothing more to do.
+ case LegacyAggregatorClient:
+ if err := c.placementWatcher.Watch(); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("unrecognized client type: %v", c.aggregatorClientType)
+ }
+
c.state = clientInitialized
- return c.placementWatcher.Watch()
+ return nil
}
func (c *client) WriteUntimedCounter(
@@ -256,6 +333,40 @@ func (c *client) WriteTimed(
return err
}
+func (c *client) WritePassthrough(
+ metric aggregated.Metric,
+ storagePolicy policy.StoragePolicy,
+) error {
+ callStart := c.nowFn()
+ payload := payloadUnion{
+ payloadType: passthroughType,
+ passthrough: passthroughPayload{
+ metric: metric,
+ storagePolicy: storagePolicy,
+ },
+ }
+ err := c.write(metric.ID, metric.TimeNanos, payload)
+ c.metrics.writePassthrough.ReportSuccessOrError(err, c.nowFn().Sub(callStart))
+ return err
+}
+
+func (c *client) WriteTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ metadatas metadata.StagedMetadatas,
+) error {
+ callStart := c.nowFn()
+ payload := payloadUnion{
+ payloadType: timedWithStagedMetadatasType,
+ timedWithStagedMetadatas: timedWithStagedMetadatas{
+ metric: metric,
+ metadatas: metadatas,
+ },
+ }
+ err := c.write(metric.ID, metric.TimeNanos, payload)
+ c.metrics.writeForwarded.ReportSuccessOrError(err, c.nowFn().Sub(callStart))
+ return err
+}
+
func (c *client) WriteForwarded(
metric aggregated.ForwardedMetric,
metadata metadata.ForwardMetadata,
@@ -274,15 +385,23 @@ func (c *client) WriteForwarded(
}
func (c *client) Flush() error {
- callStart := c.nowFn()
+ var (
+ callStart = c.nowFn()
+ err error
+ )
c.RLock()
+ defer c.RUnlock()
+
if c.state != clientInitialized {
- c.RUnlock()
return errClientIsUninitializedOrClosed
}
- err := c.writerMgr.Flush()
- c.RUnlock()
- c.metrics.flush.ReportSuccessOrError(err, c.nowFn().Sub(callStart))
+
+ switch c.aggregatorClientType {
+ case LegacyAggregatorClient:
+ err = c.writerMgr.Flush()
+ c.metrics.flush.ReportSuccessOrError(err, c.nowFn().Sub(callStart))
+ }
+
return err
}
@@ -293,12 +412,35 @@ func (c *client) Close() error {
if c.state != clientInitialized {
return errClientIsUninitializedOrClosed
}
+
+ var err error
+ switch c.aggregatorClientType {
+ case M3MsgAggregatorClient:
+ c.m3msg.producer.Close(producer.WaitForConsumption)
+ case LegacyAggregatorClient:
+ c.placementWatcher.Unwatch() // nolint: errcheck
+ err = c.writerMgr.Close()
+ default:
+ return fmt.Errorf("unrecognized client type: %v", c.aggregatorClientType)
+ }
+
c.state = clientClosed
- c.placementWatcher.Unwatch() // nolint: errcheck
- return c.writerMgr.Close()
+
+ return err
}
func (c *client) write(metricID id.RawID, timeNanos int64, payload payloadUnion) error {
+ switch c.aggregatorClientType {
+ case LegacyAggregatorClient:
+ return c.writeLegacy(metricID, timeNanos, payload)
+ case M3MsgAggregatorClient:
+ return c.writeM3Msg(metricID, timeNanos, payload)
+ default:
+ return fmt.Errorf("unrecognized client type: %v", c.aggregatorClientType)
+ }
+}
+
+func (c *client) writeLegacy(metricID id.RawID, timeNanos int64, payload payloadUnion) error {
c.RLock()
if c.state != clientInitialized {
c.RUnlock()
@@ -338,12 +480,30 @@ func (c *client) write(metricID id.RawID, timeNanos int64, payload payloadUnion)
multiErr = multiErr.Add(err)
}
}
+
onPlacementDoneFn()
onStagedPlacementDoneFn()
c.RUnlock()
return multiErr.FinalError()
}
+func (c *client) writeM3Msg(metricID id.RawID, timeNanos int64, payload payloadUnion) error {
+ shard := c.shardFn(metricID, c.m3msg.numShards)
+
+ msg := c.m3msg.messagePool.Get()
+ if err := msg.Encode(shard, payload); err != nil {
+ msg.Finalize(producer.Dropped)
+ return err
+ }
+
+ if err := c.m3msg.producer.Produce(msg); err != nil {
+ msg.Finalize(producer.Dropped)
+ return err
+ }
+
+ return nil
+}
+
func (c *client) shouldWriteForShard(nowNanos int64, shard shard.Shard) bool {
writeEarliestNanos, writeLatestNanos := c.writeTimeRangeFor(shard)
return nowNanos >= writeEarliestNanos && nowNanos <= writeLatestNanos
@@ -364,4 +524,176 @@ func (c *client) writeTimeRangeFor(shard shard.Shard) (int64, int64) {
return earliestNanos, latestNanos
}
-func (c *client) nowNanos() int64 { return c.nowFn().UnixNano() }
+func (c *client) nowNanos() int64 {
+ return c.nowFn().UnixNano()
+}
+
+type messagePool struct {
+ pool sync.Pool
+}
+
+func newMessagePool() *messagePool {
+ p := &messagePool{}
+ p.pool.New = func() interface{} {
+ return newMessage(p)
+ }
+ return p
+}
+
+func (m *messagePool) Get() *message {
+ return m.pool.Get().(*message)
+}
+
+func (m *messagePool) Put(msg *message) {
+ m.pool.Put(msg)
+}
+
+// Ensure message implements m3msg producer message interface.
+var _ producer.Message = (*message)(nil)
+
+type message struct {
+ pool *messagePool
+ shard uint32
+
+ metric metricpb.MetricWithMetadatas
+ cm metricpb.CounterWithMetadatas
+ bm metricpb.BatchTimerWithMetadatas
+ gm metricpb.GaugeWithMetadatas
+ fm metricpb.ForwardedMetricWithMetadata
+ tm metricpb.TimedMetricWithMetadata
+ tms metricpb.TimedMetricWithMetadatas
+
+ buf []byte
+}
+
+func newMessage(pool *messagePool) *message {
+ return &message{
+ pool: pool,
+ }
+}
+
+func (m *message) Encode(
+ shard uint32,
+ payload payloadUnion,
+) error {
+ m.shard = shard
+
+ switch payload.payloadType {
+ case untimedType:
+ switch payload.untimed.metric.Type {
+ case metric.CounterType:
+ value := unaggregated.CounterWithMetadatas{
+ Counter: payload.untimed.metric.Counter(),
+ StagedMetadatas: payload.untimed.metadatas,
+ }
+ if err := value.ToProto(&m.cm); err != nil {
+ return err
+ }
+
+ m.metric = metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_COUNTER_WITH_METADATAS,
+ CounterWithMetadatas: &m.cm,
+ }
+ case metric.TimerType:
+ value := unaggregated.BatchTimerWithMetadatas{
+ BatchTimer: payload.untimed.metric.BatchTimer(),
+ StagedMetadatas: payload.untimed.metadatas,
+ }
+ if err := value.ToProto(&m.bm); err != nil {
+ return err
+ }
+
+ m.metric = metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_BATCH_TIMER_WITH_METADATAS,
+ BatchTimerWithMetadatas: &m.bm,
+ }
+ case metric.GaugeType:
+ value := unaggregated.GaugeWithMetadatas{
+ Gauge: payload.untimed.metric.Gauge(),
+ StagedMetadatas: payload.untimed.metadatas,
+ }
+ if err := value.ToProto(&m.gm); err != nil {
+ return err
+ }
+
+ m.metric = metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_GAUGE_WITH_METADATAS,
+ GaugeWithMetadatas: &m.gm,
+ }
+ default:
+ return fmt.Errorf("unrecognized metric type: %v",
+ payload.untimed.metric.Type)
+ }
+ case forwardedType:
+ value := aggregated.ForwardedMetricWithMetadata{
+ ForwardedMetric: payload.forwarded.metric,
+ ForwardMetadata: payload.forwarded.metadata,
+ }
+ if err := value.ToProto(&m.fm); err != nil {
+ return err
+ }
+
+ m.metric = metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_FORWARDED_METRIC_WITH_METADATA,
+ ForwardedMetricWithMetadata: &m.fm,
+ }
+ case timedType:
+ value := aggregated.TimedMetricWithMetadata{
+ Metric: payload.timed.metric,
+ TimedMetadata: payload.timed.metadata,
+ }
+ if err := value.ToProto(&m.tm); err != nil {
+ return err
+ }
+
+ m.metric = metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATA,
+ TimedMetricWithMetadata: &m.tm,
+ }
+ case timedWithStagedMetadatasType:
+ value := aggregated.TimedMetricWithMetadatas{
+ Metric: payload.timedWithStagedMetadatas.metric,
+ StagedMetadatas: payload.timedWithStagedMetadatas.metadatas,
+ }
+ if err := value.ToProto(&m.tms); err != nil {
+ return err
+ }
+
+ m.metric = metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATAS,
+ TimedMetricWithMetadatas: &m.tms,
+ }
+ default:
+ return fmt.Errorf("unrecognized payload type: %v",
+ payload.payloadType)
+ }
+
+ size := m.metric.Size()
+ if size > cap(m.buf) {
+ const growthFactor = 2
+ m.buf = make([]byte, int(growthFactor*float64(size)))
+ }
+
+ // Resize buffer to exactly how long we need for marshalling.
+ m.buf = m.buf[:size]
+
+ _, err := m.metric.MarshalTo(m.buf)
+ return err
+}
+
+func (m *message) Shard() uint32 {
+ return m.shard
+}
+
+func (m *message) Bytes() []byte {
+ return m.buf
+}
+
+func (m *message) Size() int {
+ return len(m.buf)
+}
+
+func (m *message) Finalize(reason producer.FinalizeReason) {
+ // Return to pool.
+ m.pool.Put(m)
+}
diff --git a/src/aggregator/client/client_mock.go b/src/aggregator/client/client_mock.go
index edadd51a6f..e63c8ab47c 100644
--- a/src/aggregator/client/client_mock.go
+++ b/src/aggregator/client/client_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/aggregator/client (interfaces: Client,AdminClient)
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/metrics/metadata"
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/policy"
"github.com/golang/mock/gomock"
)
@@ -99,6 +100,20 @@ func (mr *MockClientMockRecorder) Init() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockClient)(nil).Init))
}
+// WritePassthrough mocks base method
+func (m *MockClient) WritePassthrough(arg0 aggregated.Metric, arg1 policy.StoragePolicy) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WritePassthrough", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WritePassthrough indicates an expected call of WritePassthrough
+func (mr *MockClientMockRecorder) WritePassthrough(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePassthrough", reflect.TypeOf((*MockClient)(nil).WritePassthrough), arg0, arg1)
+}
+
// WriteTimed mocks base method
func (m *MockClient) WriteTimed(arg0 aggregated.Metric, arg1 metadata.TimedMetadata) error {
m.ctrl.T.Helper()
@@ -113,6 +128,20 @@ func (mr *MockClientMockRecorder) WriteTimed(arg0, arg1 interface{}) *gomock.Cal
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTimed", reflect.TypeOf((*MockClient)(nil).WriteTimed), arg0, arg1)
}
+// WriteTimedWithStagedMetadatas mocks base method
+func (m *MockClient) WriteTimedWithStagedMetadatas(arg0 aggregated.Metric, arg1 metadata.StagedMetadatas) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WriteTimedWithStagedMetadatas", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WriteTimedWithStagedMetadatas indicates an expected call of WriteTimedWithStagedMetadatas
+func (mr *MockClientMockRecorder) WriteTimedWithStagedMetadatas(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTimedWithStagedMetadatas", reflect.TypeOf((*MockClient)(nil).WriteTimedWithStagedMetadatas), arg0, arg1)
+}
+
// WriteUntimedBatchTimer mocks base method
func (m *MockClient) WriteUntimedBatchTimer(arg0 unaggregated.BatchTimer, arg1 metadata.StagedMetadatas) error {
m.ctrl.T.Helper()
@@ -234,6 +263,20 @@ func (mr *MockAdminClientMockRecorder) WriteForwarded(arg0, arg1 interface{}) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteForwarded", reflect.TypeOf((*MockAdminClient)(nil).WriteForwarded), arg0, arg1)
}
+// WritePassthrough mocks base method
+func (m *MockAdminClient) WritePassthrough(arg0 aggregated.Metric, arg1 policy.StoragePolicy) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WritePassthrough", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WritePassthrough indicates an expected call of WritePassthrough
+func (mr *MockAdminClientMockRecorder) WritePassthrough(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePassthrough", reflect.TypeOf((*MockAdminClient)(nil).WritePassthrough), arg0, arg1)
+}
+
// WriteTimed mocks base method
func (m *MockAdminClient) WriteTimed(arg0 aggregated.Metric, arg1 metadata.TimedMetadata) error {
m.ctrl.T.Helper()
@@ -248,6 +291,20 @@ func (mr *MockAdminClientMockRecorder) WriteTimed(arg0, arg1 interface{}) *gomoc
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTimed", reflect.TypeOf((*MockAdminClient)(nil).WriteTimed), arg0, arg1)
}
+// WriteTimedWithStagedMetadatas mocks base method
+func (m *MockAdminClient) WriteTimedWithStagedMetadatas(arg0 aggregated.Metric, arg1 metadata.StagedMetadatas) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WriteTimedWithStagedMetadatas", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WriteTimedWithStagedMetadatas indicates an expected call of WriteTimedWithStagedMetadatas
+func (mr *MockAdminClientMockRecorder) WriteTimedWithStagedMetadatas(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTimedWithStagedMetadatas", reflect.TypeOf((*MockAdminClient)(nil).WriteTimedWithStagedMetadatas), arg0, arg1)
+}
+
// WriteUntimedBatchTimer mocks base method
func (m *MockAdminClient) WriteUntimedBatchTimer(arg0 unaggregated.BatchTimer, arg1 metadata.StagedMetadatas) error {
m.ctrl.T.Helper()
diff --git a/src/aggregator/client/client_test.go b/src/aggregator/client/client_test.go
index bb024a9f5e..e486742635 100644
--- a/src/aggregator/client/client_test.go
+++ b/src/aggregator/client/client_test.go
@@ -66,7 +66,7 @@ var (
}
testTimed = aggregated.Metric{
Type: metric.CounterType,
- ID: []byte("testForwarded"),
+ ID: []byte("testTimed"),
TimeNanos: 1234,
Value: 178,
}
@@ -76,6 +76,12 @@ var (
TimeNanos: 1234,
Values: []float64{34567, 256, 178},
}
+ testPassthrough = aggregated.Metric{
+ Type: metric.CounterType,
+ ID: []byte("testPassthrough"),
+ TimeNanos: 12345,
+ Value: 123,
+ }
testStagedMetadatas = metadata.StagedMetadatas{
{
CutoverNanos: 100,
@@ -127,7 +133,8 @@ var (
SourceID: 1234,
NumForwardedTimes: 3,
}
- testPlacementInstances = []placement.Instance{
+ testPassthroughMetadata = policy.NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour)
+ testPlacementInstances = []placement.Instance{
placement.NewInstance().
SetID("instance1").
SetEndpoint("instance1_endpoint").
@@ -188,8 +195,16 @@ var (
SetInstances(testPlacementInstances)
)
+func mustNewTestClient(t *testing.T, opts Options) *client {
+ c, err := NewClient(opts)
+ require.NoError(t, err)
+ value, ok := c.(*client)
+ require.True(t, ok)
+ return value
+}
+
func TestClientInitUninitializedOrClosed(t *testing.T) {
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
require.Equal(t, errClientIsInitializedOrClosed, c.Init())
@@ -205,7 +220,7 @@ func TestClientInitWatcherWatchError(t *testing.T) {
errTestWatcherWatch := errors.New("error watching")
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().Watch().Return(errTestWatcherWatch)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.placementWatcher = watcher
require.Equal(t, errTestWatcherWatch, c.Init())
}
@@ -216,14 +231,14 @@ func TestClientInitSuccess(t *testing.T) {
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().Watch().Return(nil)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.placementWatcher = watcher
require.NoError(t, c.Init())
require.Equal(t, clientInitialized, c.state)
}
func TestClientWriteUntimedMetricClosed(t *testing.T) {
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientUninitialized
for _, input := range []unaggregated.MetricUnion{testCounter, testBatchTimer, testGauge} {
var err error
@@ -246,7 +261,7 @@ func TestClientWriteUntimedMetricActiveStagedPlacementError(t *testing.T) {
errActiveStagedPlacementError := errors.New("error active staged placement")
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(nil, nil, errActiveStagedPlacementError).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.placementWatcher = watcher
@@ -273,7 +288,7 @@ func TestClientWriteUntimedMetricActivePlacementError(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(nil, nil, errActivePlacementError).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.placementWatcher = watcher
@@ -318,7 +333,7 @@ func TestClientWriteUntimedMetricSuccess(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
c.writerMgr = writerMgr
@@ -384,7 +399,7 @@ func TestClientWriteUntimedMetricPartialError(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
c.writerMgr = writerMgr
@@ -412,7 +427,7 @@ func TestClientWriteUntimedMetricBeforeShardCutover(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.shardCutoverWarmupDuration = time.Second
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testCutoverNanos-1).Add(-time.Second) }
@@ -433,7 +448,7 @@ func TestClientWriteUntimedMetricAfterShardCutoff(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.shardCutoffLingerDuration = time.Second
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testCutoffNanos+1).Add(time.Second) }
@@ -472,7 +487,7 @@ func TestClientWriteTimedMetricSuccess(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
c.writerMgr = writerMgr
@@ -524,7 +539,7 @@ func TestClientWriteTimedMetricPartialError(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
c.writerMgr = writerMgr
@@ -572,7 +587,7 @@ func TestClientWriteForwardedMetricSuccess(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
c.writerMgr = writerMgr
@@ -624,7 +639,7 @@ func TestClientWriteForwardedMetricPartialError(t *testing.T) {
stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
watcher := placement.NewMockStagedPlacementWatcher(ctrl)
watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
c.writerMgr = writerMgr
@@ -645,8 +660,108 @@ func TestClientWriteForwardedMetricPartialError(t *testing.T) {
require.Equal(t, testForwardMetadata, payloadRes.forwarded.metadata)
}
+func TestClientWritePassthroughMetricSuccess(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ var (
+ instancesRes []placement.Instance
+ shardRes uint32
+ payloadRes payloadUnion
+ )
+ writerMgr := NewMockinstanceWriterManager(ctrl)
+ writerMgr.EXPECT().
+ Write(gomock.Any(), gomock.Any(), gomock.Any()).
+ DoAndReturn(func(
+ instance placement.Instance,
+ shardID uint32,
+ payload payloadUnion,
+ ) error {
+ instancesRes = append(instancesRes, instance)
+ shardRes = shardID
+ payloadRes = payload
+ return nil
+ }).
+ MinTimes(1)
+ stagedPlacement := placement.NewMockActiveStagedPlacement(ctrl)
+ stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
+ watcher := placement.NewMockStagedPlacementWatcher(ctrl)
+ watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
+ c := mustNewTestClient(t, testOptions())
+ c.state = clientInitialized
+ c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
+ c.writerMgr = writerMgr
+ c.placementWatcher = watcher
+
+ expectedInstances := []placement.Instance{
+ testPlacementInstances[0],
+ testPlacementInstances[2],
+ }
+ testMetric := testPassthrough
+ testMetric.TimeNanos = testNowNanos
+ err := c.WritePassthrough(testMetric, testPassthroughMetadata)
+ require.NoError(t, err)
+ require.Equal(t, expectedInstances, instancesRes)
+ require.Equal(t, uint32(1), shardRes)
+ require.Equal(t, passthroughType, payloadRes.payloadType)
+ require.Equal(t, testMetric, payloadRes.passthrough.metric)
+ require.Equal(t, testPassthroughMetadata, payloadRes.passthrough.storagePolicy)
+}
+
+func TestClientWritePassthroughMetricPartialError(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ var (
+ instancesRes []placement.Instance
+ shardRes uint32
+ payloadRes payloadUnion
+ errInstanceWrite = errors.New("instance write error")
+ )
+ writerMgr := NewMockinstanceWriterManager(ctrl)
+ writerMgr.EXPECT().
+ Write(gomock.Any(), gomock.Any(), gomock.Any()).
+ DoAndReturn(func(
+ instance placement.Instance,
+ shardID uint32,
+ payload payloadUnion,
+ ) error {
+ if instance.ID() == testPlacementInstances[0].ID() {
+ return errInstanceWrite
+ }
+ instancesRes = append(instancesRes, instance)
+ shardRes = shardID
+ payloadRes = payload
+ return nil
+ }).
+ MinTimes(1)
+ stagedPlacement := placement.NewMockActiveStagedPlacement(ctrl)
+ stagedPlacement.EXPECT().ActivePlacement().Return(testPlacement, func() {}, nil).MinTimes(1)
+ watcher := placement.NewMockStagedPlacementWatcher(ctrl)
+ watcher.EXPECT().ActiveStagedPlacement().Return(stagedPlacement, func() {}, nil).MinTimes(1)
+ c := mustNewTestClient(t, testOptions())
+ c.state = clientInitialized
+ c.nowFn = func() time.Time { return time.Unix(0, testNowNanos) }
+ c.writerMgr = writerMgr
+ c.placementWatcher = watcher
+
+ expectedInstances := []placement.Instance{
+ testPlacementInstances[2],
+ }
+ testMetric := testPassthrough
+ testMetric.TimeNanos = testNowNanos
+ err := c.WritePassthrough(testMetric, testPassthroughMetadata)
+ require.Error(t, err)
+ require.True(t, strings.Contains(err.Error(), errInstanceWrite.Error()))
+ require.Equal(t, expectedInstances, instancesRes)
+ require.Equal(t, uint32(1), shardRes)
+ require.Equal(t, passthroughType, payloadRes.payloadType)
+ require.Equal(t, testMetric, payloadRes.passthrough.metric)
+ require.Equal(t, testPassthroughMetadata, payloadRes.passthrough.storagePolicy)
+}
+
func TestClientFlushClosed(t *testing.T) {
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientClosed
require.Equal(t, errClientIsUninitializedOrClosed, c.Flush())
}
@@ -658,8 +773,7 @@ func TestClientFlushError(t *testing.T) {
errTestFlush := errors.New("test flush error")
writerMgr := NewMockinstanceWriterManager(ctrl)
writerMgr.EXPECT().Flush().Return(errTestFlush).MinTimes(1)
- opts := testOptions()
- c := NewClient(opts).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.writerMgr = writerMgr
require.Equal(t, errTestFlush, c.Flush())
@@ -671,15 +785,14 @@ func TestClientFlushSuccess(t *testing.T) {
writerMgr := NewMockinstanceWriterManager(ctrl)
writerMgr.EXPECT().Flush().Return(nil).MinTimes(1)
- opts := testOptions()
- c := NewClient(opts).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
c.writerMgr = writerMgr
require.NoError(t, c.Flush())
}
func TestClientCloseUninitializedOrClosed(t *testing.T) {
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientUninitialized
require.Equal(t, errClientIsUninitializedOrClosed, c.Close())
@@ -689,13 +802,13 @@ func TestClientCloseUninitializedOrClosed(t *testing.T) {
}
func TestClientCloseSuccess(t *testing.T) {
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
c.state = clientInitialized
require.NoError(t, c.Close())
}
func TestClientWriteTimeRangeFor(t *testing.T) {
- c := NewClient(testOptions()).(*client)
+ c := mustNewTestClient(t, testOptions())
testShard := shard.NewShard(0).SetState(shard.Initializing)
for _, input := range []struct {
cutoverNanos int64
diff --git a/src/aggregator/client/config.go b/src/aggregator/client/config.go
index d99a553862..cb6d732d4c 100644
--- a/src/aggregator/client/config.go
+++ b/src/aggregator/client/config.go
@@ -21,6 +21,8 @@
package client
import (
+ "errors"
+ "fmt"
"time"
"github.com/m3db/m3/src/aggregator/sharding"
@@ -28,28 +30,37 @@ import (
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cluster/placement"
"github.com/m3db/m3/src/metrics/encoding/protobuf"
+ producerconfig "github.com/m3db/m3/src/msg/producer/config"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/retry"
"github.com/uber-go/tally"
)
+var (
+ errNoM3MsgOptions = errors.New("m3msg aggregator client: missing m3msg options")
+)
+
// Configuration contains client configuration.
type Configuration struct {
- PlacementKV kv.OverrideConfiguration `yaml:"placementKV" validate:"nonzero"`
- PlacementWatcher placement.WatcherConfiguration `yaml:"placementWatcher"`
- HashType *sharding.HashType `yaml:"hashType"`
- ShardCutoverWarmupDuration *time.Duration `yaml:"shardCutoverWarmupDuration"`
- ShardCutoffLingerDuration *time.Duration `yaml:"shardCutoffLingerDuration"`
- Encoder EncoderConfiguration `yaml:"encoder"`
- FlushSize int `yaml:"flushSize"`
- MaxBatchSize int `yaml:"maxBatchSize"`
- MaxTimerBatchSize int `yaml:"maxTimerBatchSize"`
- QueueSize int `yaml:"queueSize"`
- QueueDropType *DropType `yaml:"queueDropType"`
- Connection ConnectionConfiguration `yaml:"connection"`
+ Type AggregatorClientType `yaml:"type"`
+ M3Msg *M3MsgConfiguration `yaml:"m3msg"`
+ PlacementKV *kv.OverrideConfiguration `yaml:"placementKV"`
+ PlacementWatcher *placement.WatcherConfiguration `yaml:"placementWatcher"`
+ HashType *sharding.HashType `yaml:"hashType"`
+ ShardCutoverWarmupDuration *time.Duration `yaml:"shardCutoverWarmupDuration"`
+ ShardCutoffLingerDuration *time.Duration `yaml:"shardCutoffLingerDuration"`
+ Encoder EncoderConfiguration `yaml:"encoder"`
+ FlushSize int `yaml:"flushSize"`
+ MaxBatchSize int `yaml:"maxBatchSize"`
+ MaxTimerBatchSize int `yaml:"maxTimerBatchSize"`
+ BatchFlushDeadline time.Duration `yaml:"batchFlushDeadline"`
+ QueueSize int `yaml:"queueSize"`
+ QueueDropType *DropType `yaml:"queueDropType"`
+ Connection ConnectionConfiguration `yaml:"connection"`
}
// NewAdminClient creates a new admin client.
@@ -57,8 +68,9 @@ func (c *Configuration) NewAdminClient(
kvClient m3clusterclient.Client,
clockOpts clock.Options,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (AdminClient, error) {
- client, err := c.NewClient(kvClient, clockOpts, instrumentOpts)
+ client, err := c.NewClient(kvClient, clockOpts, instrumentOpts, rwOpts)
if err != nil {
return nil, err
}
@@ -70,75 +82,126 @@ func (c *Configuration) NewClient(
kvClient m3clusterclient.Client,
clockOpts clock.Options,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (Client, error) {
- opts, err := c.newClientOptions(kvClient, clockOpts, instrumentOpts)
+ opts, err := c.newClientOptions(kvClient, clockOpts, instrumentOpts, rwOpts)
if err != nil {
return nil, err
}
- return NewClient(opts), nil
+
+ return NewClient(opts)
}
+var (
+ errLegacyClientNoPlacementKVConfig = errors.New("no placement KV config set")
+ errLegacyClientNoPlacementWatcherConfig = errors.New("no placement watcher config set")
+)
+
func (c *Configuration) newClientOptions(
kvClient m3clusterclient.Client,
clockOpts clock.Options,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (Options, error) {
- scope := instrumentOpts.MetricsScope()
- connectionOpts := c.Connection.NewConnectionOptions(scope.SubScope("connection"))
- kvOpts, err := c.PlacementKV.NewOverrideOptions()
- if err != nil {
- return nil, err
- }
+ opts := NewOptions().
+ SetAggregatorClientType(c.Type).
+ SetClockOptions(clockOpts).
+ SetInstrumentOptions(instrumentOpts).
+ SetRWOptions(rwOpts)
- placementStore, err := kvClient.Store(kvOpts)
- if err != nil {
- return nil, err
- }
+ switch c.Type {
+ case M3MsgAggregatorClient:
+ m3msgCfg := c.M3Msg
+ if m3msgCfg == nil {
+ return nil, errNoM3MsgOptions
+ }
- iOpts := instrumentOpts.SetMetricsScope(scope.SubScope("encoder"))
- encoderOpts := c.Encoder.NewEncoderOptions(iOpts)
+ m3msgOpts, err := m3msgCfg.NewM3MsgOptions(kvClient, instrumentOpts, rwOpts)
+ if err != nil {
+ return nil, err
+ }
- iOpts = instrumentOpts.SetMetricsScope(scope.SubScope("placement-watcher"))
- watcherOpts := c.PlacementWatcher.NewOptions(placementStore, iOpts)
+ // Allow M3Msg options to override the timer options for instrument options.
+ opts = opts.SetInstrumentOptions(
+ opts.InstrumentOptions().SetTimerOptions(m3msgOpts.TimerOptions()))
- // Get the shard fn.
- hashType := sharding.DefaultHash
- if c.HashType != nil {
- hashType = *c.HashType
- }
- shardFn, err := hashType.ShardFn()
- if err != nil {
- return nil, err
- }
+ // Set the M3Msg options configured.
+ opts = opts.SetM3MsgOptions(m3msgOpts)
+ case LegacyAggregatorClient:
+ placementKV := c.PlacementKV
+ if placementKV == nil {
+ return nil, errLegacyClientNoPlacementKVConfig
+ }
- opts := NewOptions().
- SetClockOptions(clockOpts).
- SetInstrumentOptions(instrumentOpts).
- SetStagedPlacementWatcherOptions(watcherOpts).
- SetShardFn(shardFn).
- SetEncoderOptions(encoderOpts).
- SetConnectionOptions(connectionOpts)
+ placementWatcher := c.PlacementWatcher
+ if placementWatcher == nil {
+ return nil, errLegacyClientNoPlacementWatcherConfig
+ }
- if c.ShardCutoverWarmupDuration != nil {
- opts = opts.SetShardCutoverWarmupDuration(*c.ShardCutoverWarmupDuration)
- }
- if c.ShardCutoffLingerDuration != nil {
- opts = opts.SetShardCutoffLingerDuration(*c.ShardCutoffLingerDuration)
- }
- if c.FlushSize != 0 {
- opts = opts.SetFlushSize(c.FlushSize)
- }
- if c.MaxBatchSize != 0 {
- opts = opts.SetMaxBatchSize(c.MaxBatchSize)
- }
- if c.MaxTimerBatchSize != 0 {
- opts = opts.SetMaxTimerBatchSize(c.MaxTimerBatchSize)
- }
- if c.QueueSize != 0 {
- opts = opts.SetInstanceQueueSize(c.QueueSize)
+ scope := instrumentOpts.MetricsScope()
+ connectionOpts := c.Connection.NewConnectionOptions(scope.SubScope("connection"))
+ kvOpts, err := placementKV.NewOverrideOptions()
+ if err != nil {
+ return nil, err
+ }
+
+ placementStore, err := kvClient.Store(kvOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ iOpts := instrumentOpts.SetMetricsScope(scope.SubScope("encoder"))
+ encoderOpts := c.Encoder.NewEncoderOptions(iOpts)
+
+ iOpts = instrumentOpts.SetMetricsScope(scope.SubScope("placement-watcher"))
+ watcherOpts := placementWatcher.NewOptions(placementStore, iOpts)
+
+ // Get the shard fn.
+ hashType := sharding.DefaultHash
+ if c.HashType != nil {
+ hashType = *c.HashType
+ }
+ shardFn, err := hashType.ShardFn()
+ if err != nil {
+ return nil, err
+ }
+
+ opts = opts.SetStagedPlacementWatcherOptions(watcherOpts).
+ SetShardFn(shardFn).
+ SetEncoderOptions(encoderOpts).
+ SetConnectionOptions(connectionOpts)
+
+ if c.ShardCutoverWarmupDuration != nil {
+ opts = opts.SetShardCutoverWarmupDuration(*c.ShardCutoverWarmupDuration)
+ }
+ if c.ShardCutoffLingerDuration != nil {
+ opts = opts.SetShardCutoffLingerDuration(*c.ShardCutoffLingerDuration)
+ }
+ if c.FlushSize != 0 {
+ opts = opts.SetFlushSize(c.FlushSize)
+ }
+ if c.MaxBatchSize != 0 {
+ opts = opts.SetMaxBatchSize(c.MaxBatchSize)
+ }
+ if c.MaxTimerBatchSize != 0 {
+ opts = opts.SetMaxTimerBatchSize(c.MaxTimerBatchSize)
+ }
+ if c.BatchFlushDeadline != 0 {
+ opts = opts.SetBatchFlushDeadline(c.BatchFlushDeadline)
+ }
+ if c.QueueSize != 0 {
+ opts = opts.SetInstanceQueueSize(c.QueueSize)
+ }
+ if c.QueueDropType != nil {
+ opts = opts.SetQueueDropType(*c.QueueDropType)
+ }
+ default:
+ return nil, fmt.Errorf("unknown client type: %v", c.Type)
}
- if c.QueueDropType != nil {
- opts = opts.SetQueueDropType(*c.QueueDropType)
+
+ // Validate the options.
+ if err := opts.Validate(); err != nil {
+ return nil, err
}
return opts, nil
}
@@ -213,3 +276,36 @@ func (c *EncoderConfiguration) NewEncoderOptions(
}
return opts
}
+
+// M3MsgConfiguration contains the M3Msg client configuration, required
+// if using M3Msg client type.
+type M3MsgConfiguration struct {
+ Producer producerconfig.ProducerConfiguration `yaml:"producer"`
+}
+
+// NewM3MsgOptions returns new M3Msg options from configuration.
+func (c *M3MsgConfiguration) NewM3MsgOptions(
+ kvClient m3clusterclient.Client,
+ instrumentOpts instrument.Options,
+ rwOpts xio.Options,
+) (M3MsgOptions, error) {
+ opts := NewM3MsgOptions()
+
+ // For M3Msg clients we want to use the default timer options
+ // as defined by the default M3Msg options for low overhead
+ // timers.
+ instrumentOpts = instrumentOpts.SetTimerOptions(opts.TimerOptions())
+
+ producer, err := c.Producer.NewProducer(kvClient, instrumentOpts, rwOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = opts.SetProducer(producer)
+
+ // Validate the options.
+ if err := opts.Validate(); err != nil {
+ return nil, err
+ }
+ return opts, nil
+}
diff --git a/src/aggregator/client/config_test.go b/src/aggregator/client/config_test.go
index 34c794fe40..6f91127b3a 100644
--- a/src/aggregator/client/config_test.go
+++ b/src/aggregator/client/config_test.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/cluster/kv/mem"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/golang/mock/gomock"
@@ -62,7 +63,9 @@ encoder:
low: 0.001
high: 0.01
flushSize: 1440
+maxBatchSize: 42
maxTimerBatchSize: 140
+batchFlushDeadline: 123ms
queueSize: 1000
queueDropType: oldest
connection:
@@ -103,6 +106,8 @@ func TestConfigUnmarshal(t *testing.T) {
require.Equal(t, 0.01, cfg.Encoder.BytesPool.Watermark.RefillHighWatermark)
require.Equal(t, 1440, cfg.FlushSize)
require.Equal(t, 140, cfg.MaxTimerBatchSize)
+ require.Equal(t, 42, cfg.MaxBatchSize)
+ require.Equal(t, 123*time.Millisecond, cfg.BatchFlushDeadline)
require.Equal(t, 1000, cfg.QueueSize)
require.Equal(t, DropOldest, *cfg.QueueDropType)
require.Equal(t, time.Second, cfg.Connection.ConnectionTimeout)
@@ -136,7 +141,8 @@ func TestNewClientOptions(t *testing.T) {
kvClient.EXPECT().Store(expectedKvOpts).Return(store, nil)
clockOpts := clock.NewOptions()
instrumentOpts := instrument.NewOptions()
- opts, err := cfg.newClientOptions(kvClient, clockOpts, instrumentOpts)
+ rwOpts := xio.NewOptions()
+ opts, err := cfg.newClientOptions(kvClient, clockOpts, instrumentOpts, rwOpts)
require.NoError(t, err)
// Verify the constructed options match expectations.
@@ -151,7 +157,8 @@ func TestNewClientOptions(t *testing.T) {
require.Equal(t, time.Minute, opts.ShardCutoffLingerDuration())
require.Equal(t, 1440, opts.FlushSize())
require.Equal(t, 140, opts.MaxTimerBatchSize())
- require.Equal(t, 140, opts.MaxTimerBatchSize())
+ require.Equal(t, 42, opts.MaxBatchSize())
+ require.Equal(t, 123*time.Millisecond, opts.BatchFlushDeadline())
require.Equal(t, DropOldest, opts.QueueDropType())
require.Equal(t, time.Second, opts.ConnectionOptions().ConnectionTimeout())
require.Equal(t, true, opts.ConnectionOptions().ConnectionKeepAlive())
diff --git a/src/aggregator/client/conn.go b/src/aggregator/client/conn.go
index 0f63765c00..f37d23cc96 100644
--- a/src/aggregator/client/conn.go
+++ b/src/aggregator/client/conn.go
@@ -28,6 +28,7 @@ import (
"time"
"github.com/m3db/m3/src/x/clock"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/retry"
"github.com/uber-go/tally"
@@ -39,6 +40,8 @@ const (
var (
errNoActiveConnection = errors.New("no active connection")
+ errInvalidConnection = errors.New("connection is invalid")
+ uninitWriter uninitializedWriter
)
type sleepFn func(time.Duration)
@@ -62,6 +65,7 @@ type connection struct {
rngFn retry.RngFn
conn *net.TCPConn
+ writer xio.ResettableWriter
numFailures int
threshold int
lastConnectAttemptNanos int64
@@ -90,7 +94,11 @@ func newConnection(addr string, opts ConnectionOptions) *connection {
nowFn: opts.ClockOptions().NowFn(),
sleepFn: time.Sleep,
threshold: opts.InitReconnectThreshold(),
- metrics: newConnectionMetrics(opts.InstrumentOptions().MetricsScope()),
+ writer: opts.RWOptions().ResettableWriterFn()(
+ uninitWriter,
+ xio.ResettableWriterOptions{WriteBufferSize: 0},
+ ),
+ metrics: newConnectionMetrics(opts.InstrumentOptions().MetricsScope()),
}
c.connectWithLockFn = c.connectWithLock
c.writeWithLockFn = c.writeWithLock
@@ -181,7 +189,9 @@ func (c *connection) connectWithLock() error {
if c.conn != nil {
c.conn.Close() // nolint: errcheck
}
+
c.conn = tcpConn
+ c.writer.Reset(tcpConn)
return nil
}
@@ -215,7 +225,11 @@ func (c *connection) writeWithLock(data []byte) error {
if err := c.conn.SetWriteDeadline(c.nowFn().Add(c.writeTimeout)); err != nil {
c.metrics.setWriteDeadlineError.Inc(1)
}
- if _, err := c.conn.Write(data); err != nil {
+ if _, err := c.writer.Write(data); err != nil {
+ c.metrics.writeError.Inc(1)
+ return err
+ }
+ if err := c.writer.Flush(); err != nil {
c.metrics.writeError.Inc(1)
return err
}
@@ -260,3 +274,8 @@ func newConnectionMetrics(scope tally.Scope) connectionMetrics {
Counter(errorMetric),
}
}
+
+type uninitializedWriter struct{}
+
+func (u uninitializedWriter) Write(p []byte) (int, error) { return 0, errInvalidConnection }
+func (u uninitializedWriter) Close() error { return nil }
diff --git a/src/aggregator/client/conn_options.go b/src/aggregator/client/conn_options.go
index 1a2485a94d..7a85b67581 100644
--- a/src/aggregator/client/conn_options.go
+++ b/src/aggregator/client/conn_options.go
@@ -26,13 +26,14 @@ import (
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/retry"
)
const (
defaultConnectionTimeout = 2 * time.Second
defaultConnectionKeepAlive = true
- defaultWriteTimeout = 100 * time.Millisecond
+ defaultWriteTimeout = time.Duration(0)
defaultInitReconnectThreshold = 2
defaultMaxReconnectThreshold = 5000
defaultReconnectThresholdMultiplier = 2
@@ -105,6 +106,12 @@ type ConnectionOptions interface {
// WriteRetryOptions returns the retry options for retrying failed writes.
WriteRetryOptions() retry.Options
+
+ // SetRWOptions sets RW options.
+ SetRWOptions(value xio.Options) ConnectionOptions
+
+ // RWOptions returns the RW options.
+ RWOptions() xio.Options
}
type connectionOptions struct {
@@ -118,6 +125,7 @@ type connectionOptions struct {
multiplier int
maxDuration time.Duration
writeRetryOpts retry.Options
+ rwOpts xio.Options
}
// NewConnectionOptions create a new set of connection options.
@@ -139,6 +147,7 @@ func NewConnectionOptions() ConnectionOptions {
multiplier: defaultReconnectThresholdMultiplier,
maxDuration: defaultMaxReconnectDuration,
writeRetryOpts: defaultWriteRetryOpts,
+ rwOpts: xio.NewOptions(),
}
}
@@ -241,3 +250,13 @@ func (o *connectionOptions) SetWriteRetryOptions(value retry.Options) Connection
func (o *connectionOptions) WriteRetryOptions() retry.Options {
return o.writeRetryOpts
}
+
+func (o *connectionOptions) SetRWOptions(value xio.Options) ConnectionOptions {
+ opts := *o
+ opts.rwOpts = value
+ return &opts
+}
+
+func (o *connectionOptions) RWOptions() xio.Options {
+ return o.rwOpts
+}
diff --git a/src/aggregator/client/m3msg_options.go b/src/aggregator/client/m3msg_options.go
new file mode 100644
index 0000000000..ca163190ae
--- /dev/null
+++ b/src/aggregator/client/m3msg_options.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package client
+
+import (
+ "errors"
+
+ "github.com/m3db/m3/src/msg/producer"
+ "github.com/m3db/m3/src/x/instrument"
+)
+
+var (
+ errM3MsgOptionsNoProducer = errors.New("no producer set")
+
+ // defaultM3MsgTimerOptions by defaults ensures to use
+ // low overhead timers for M3Msg clients (can't do this
+ // for legacy clients since people depend on those stats being
+ // non-histograms).
+ defaultM3MsgTimerOptions = instrument.TimerOptions{
+ Type: instrument.HistogramTimerType,
+ HistogramBuckets: instrument.DefaultHistogramTimerHistogramBuckets(),
+ }
+)
+
+// M3MsgOptions is a set of M3Msg client options.
+type M3MsgOptions interface {
+ // Validate validates the M3Msg client options.
+ Validate() error
+
+ // SetProducer sets the producer.
+ SetProducer(value producer.Producer) M3MsgOptions
+
+ // Producer gets the producer.
+ Producer() producer.Producer
+
+ // SetTimerOptions sets the instrument timer options.
+ SetTimerOptions(value instrument.TimerOptions) M3MsgOptions
+
+ // TimerOptions gets the instrument timer options.
+ TimerOptions() instrument.TimerOptions
+}
+
+type m3msgOptions struct {
+ producer producer.Producer
+ timerOptions instrument.TimerOptions
+}
+
+// NewM3MsgOptions returns a new set of M3Msg options.
+func NewM3MsgOptions() M3MsgOptions {
+ return &m3msgOptions{
+ timerOptions: defaultM3MsgTimerOptions,
+ }
+}
+
+func (o *m3msgOptions) Validate() error {
+ if o.producer == nil {
+ return errM3MsgOptionsNoProducer
+ }
+ return nil
+}
+
+func (o *m3msgOptions) SetProducer(value producer.Producer) M3MsgOptions {
+ opts := *o
+ opts.producer = value
+ return &opts
+}
+
+func (o *m3msgOptions) Producer() producer.Producer {
+ return o.producer
+}
+
+func (o *m3msgOptions) SetTimerOptions(value instrument.TimerOptions) M3MsgOptions {
+ opts := *o
+ opts.timerOptions = value
+ return &opts
+}
+
+func (o *m3msgOptions) TimerOptions() instrument.TimerOptions {
+ return o.timerOptions
+}
diff --git a/src/aggregator/client/options.go b/src/aggregator/client/options.go
index 14c338bc45..73d91d9ca1 100644
--- a/src/aggregator/client/options.go
+++ b/src/aggregator/client/options.go
@@ -21,6 +21,8 @@
package client
import (
+ "errors"
+ "fmt"
"time"
"github.com/m3db/m3/src/aggregator/sharding"
@@ -28,9 +30,22 @@ import (
"github.com/m3db/m3/src/metrics/encoding/protobuf"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
)
+// AggregatorClientType determines the aggregator client type.
+type AggregatorClientType int
+
const (
+ // LegacyAggregatorClient is the legacy aggregator client type and uses it's own
+ // TCP negotation, load balancing and data transmission protocol.
+ LegacyAggregatorClient AggregatorClientType = iota
+ // M3MsgAggregatorClient is the M3Msg aggregator client type that uses M3Msg to
+ // handle publishing to a M3Msg topic the aggregator consumes from.
+ M3MsgAggregatorClient
+
+ defaultAggregatorClient = LegacyAggregatorClient
+
defaultFlushSize = 1440
// defaultMaxTimerBatchSize is the default maximum timer batch size.
@@ -60,8 +75,64 @@ const (
defaultBatchFlushDeadline = 100 * time.Millisecond
)
+var (
+ validAggregatorClientTypes = []AggregatorClientType{
+ LegacyAggregatorClient,
+ M3MsgAggregatorClient,
+ }
+
+ errLegacyClientNoWatcherOptions = errors.New("legacy client: no watcher options set")
+ errM3MsgClientNoOptions = errors.New("m3msg aggregator client: no m3msg options set")
+ errNoRWOpts = errors.New("no rw opts set for aggregator")
+)
+
+func (t AggregatorClientType) String() string {
+ switch t {
+ case LegacyAggregatorClient:
+ return "legacy"
+ case M3MsgAggregatorClient:
+ return "m3msg"
+ }
+ return "unknown"
+}
+
+// UnmarshalYAML unmarshals a AggregatorClientType into a valid type from string.
+func (t *AggregatorClientType) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var str string
+ if err := unmarshal(&str); err != nil {
+ return err
+ }
+ if str == "" {
+ *t = defaultAggregatorClient
+ return nil
+ }
+ for _, valid := range validAggregatorClientTypes {
+ if str == valid.String() {
+ *t = valid
+ return nil
+ }
+ }
+ return fmt.Errorf("invalid AggregatorClientType: value=%s, valid=%v",
+ str, validAggregatorClientTypes)
+}
+
// Options provide a set of client options.
type Options interface {
+ // Validate validates the client options.
+ Validate() error
+
+ // SetAggregatorClientType sets the client type.
+ SetAggregatorClientType(value AggregatorClientType) Options
+
+ // AggregatorClientType returns the client type.
+ AggregatorClientType() AggregatorClientType
+
+ // SetM3MsgOptions sets the M3Msg aggregator client options.
+ SetM3MsgOptions(value M3MsgOptions) Options
+
+ // M3MsgOptions returns the M3Msg aggregator client options.
+ M3MsgOptions() M3MsgOptions
+
// SetClockOptions sets the clock options.
SetClockOptions(value clock.Options) Options
@@ -147,9 +218,16 @@ type Options interface {
// BatchFlushDeadline returns the deadline that triggers a write of queued buffers.
BatchFlushDeadline() time.Duration
+
+ // SetRWOptions sets RW options.
+ SetRWOptions(value xio.Options) Options
+
+ // RWOptions returns the RW options.
+ RWOptions() xio.Options
}
type options struct {
+ aggregatorClientType AggregatorClientType
clockOpts clock.Options
instrumentOpts instrument.Options
encoderOpts protobuf.UnaggregatedOptions
@@ -164,6 +242,8 @@ type options struct {
dropType DropType
maxBatchSize int
batchFlushDeadline time.Duration
+ m3msgOptions M3MsgOptions
+ rwOpts xio.Options
}
// NewOptions creates a new set of client options.
@@ -183,9 +263,51 @@ func NewOptions() Options {
dropType: defaultDropType,
maxBatchSize: defaultMaxBatchSize,
batchFlushDeadline: defaultBatchFlushDeadline,
+ rwOpts: xio.NewOptions(),
}
}
+func (o *options) Validate() error {
+ if o.rwOpts == nil {
+ return errNoRWOpts
+ }
+ switch o.aggregatorClientType {
+ case M3MsgAggregatorClient:
+ opts := o.m3msgOptions
+ if opts == nil {
+ return errM3MsgClientNoOptions
+ }
+ return opts.Validate()
+ case LegacyAggregatorClient:
+ if o.watcherOpts == nil {
+ return errLegacyClientNoWatcherOptions
+ }
+ return nil
+ default:
+ return fmt.Errorf("unknown client type: %v", o.aggregatorClientType)
+ }
+}
+
+func (o *options) SetAggregatorClientType(value AggregatorClientType) Options {
+ opts := *o
+ opts.aggregatorClientType = value
+ return &opts
+}
+
+func (o *options) AggregatorClientType() AggregatorClientType {
+ return o.aggregatorClientType
+}
+
+func (o *options) SetM3MsgOptions(value M3MsgOptions) Options {
+ opts := *o
+ opts.m3msgOptions = value
+ return &opts
+}
+
+func (o *options) M3MsgOptions() M3MsgOptions {
+ return o.m3msgOptions
+}
+
func (o *options) SetClockOptions(value clock.Options) Options {
opts := *o
opts.clockOpts = value
@@ -332,3 +454,13 @@ func (o *options) SetBatchFlushDeadline(value time.Duration) Options {
func (o *options) BatchFlushDeadline() time.Duration {
return o.batchFlushDeadline
}
+
+func (o *options) SetRWOptions(value xio.Options) Options {
+ opts := *o
+ opts.rwOpts = value
+ return &opts
+}
+
+func (o *options) RWOptions() xio.Options {
+ return o.rwOpts
+}
diff --git a/src/aggregator/client/payload.go b/src/aggregator/client/payload.go
index dc66b37172..ffc2722450 100644
--- a/src/aggregator/client/payload.go
+++ b/src/aggregator/client/payload.go
@@ -24,6 +24,7 @@ import (
"github.com/m3db/m3/src/metrics/metadata"
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/policy"
)
type payloadType int
@@ -34,6 +35,8 @@ const (
untimedType
forwardedType
timedType
+ timedWithStagedMetadatasType
+ passthroughType
)
type untimedPayload struct {
@@ -51,9 +54,21 @@ type timedPayload struct {
metadata metadata.TimedMetadata
}
+type timedWithStagedMetadatas struct {
+ metric aggregated.Metric
+ metadatas metadata.StagedMetadatas
+}
+
+type passthroughPayload struct {
+ metric aggregated.Metric
+ storagePolicy policy.StoragePolicy
+}
+
type payloadUnion struct {
- payloadType payloadType
- untimed untimedPayload
- forwarded forwardedPayload
- timed timedPayload
+ payloadType payloadType
+ untimed untimedPayload
+ forwarded forwardedPayload
+ timed timedPayload
+ timedWithStagedMetadatas timedWithStagedMetadatas
+ passthrough passthroughPayload
}
diff --git a/src/aggregator/client/queue.go b/src/aggregator/client/queue.go
index 4fbd43c7c1..355c16d937 100644
--- a/src/aggregator/client/queue.go
+++ b/src/aggregator/client/queue.go
@@ -131,12 +131,14 @@ func newInstanceQueue(instance placement.Instance, opts Options) instanceQueue {
instrumentOpts = opts.InstrumentOptions()
scope = instrumentOpts.MetricsScope()
connInstrumentOpts = instrumentOpts.SetMetricsScope(scope.SubScope("connection"))
- connOpts = opts.ConnectionOptions().SetInstrumentOptions(connInstrumentOpts)
- conn = newConnection(instance.Endpoint(), connOpts)
- iOpts = opts.InstrumentOptions()
- queueSize = opts.InstanceQueueSize()
- maxBatchSize = opts.MaxBatchSize()
- writeInterval = opts.BatchFlushDeadline()
+ connOpts = opts.ConnectionOptions().
+ SetInstrumentOptions(connInstrumentOpts).
+ SetRWOptions(opts.RWOptions())
+ conn = newConnection(instance.Endpoint(), connOpts)
+ iOpts = opts.InstrumentOptions()
+ queueSize = opts.InstanceQueueSize()
+ maxBatchSize = opts.MaxBatchSize()
+ writeInterval = opts.BatchFlushDeadline()
)
q := &queue{
dropType: opts.QueueDropType(),
diff --git a/src/aggregator/client/writer.go b/src/aggregator/client/writer.go
index 279158af6b..4d94a69319 100644
--- a/src/aggregator/client/writer.go
+++ b/src/aggregator/client/writer.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/metrics/metric"
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/policy"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/uber-go/tally"
@@ -164,6 +165,11 @@ func (w *writer) encodeWithLock(
return w.encodeForwardedWithLock(encoder, payload.forwarded.metric, payload.forwarded.metadata)
case timedType:
return w.encodeTimedWithLock(encoder, payload.timed.metric, payload.timed.metadata)
+ case timedWithStagedMetadatasType:
+ elem := payload.timedWithStagedMetadatas
+ return w.encodeTimedWithStagedMetadatasWithLock(encoder, elem.metric, elem.metadatas)
+ case passthroughType:
+ return w.encodePassthroughWithLock(encoder, payload.passthrough.metric, payload.passthrough.storagePolicy)
default:
return fmt.Errorf("unknown payload type: %v", payload.payloadType)
}
@@ -374,6 +380,84 @@ func (w *writer) encodeTimedWithLock(
return w.enqueueBuffer(buffer)
}
+func (w *writer) encodeTimedWithStagedMetadatasWithLock(
+ encoder *lockedEncoder,
+ metric aggregated.Metric,
+ metadatas metadata.StagedMetadatas,
+) error {
+ encoder.Lock()
+
+ sizeBefore := encoder.Len()
+ msg := encoding.UnaggregatedMessageUnion{
+ Type: encoding.TimedMetricWithMetadatasType,
+ TimedMetricWithMetadatas: aggregated.TimedMetricWithMetadatas{
+ Metric: metric,
+ StagedMetadatas: metadatas,
+ }}
+ if err := encoder.EncodeMessage(msg); err != nil {
+ w.log.Error("encode timed metric error",
+ zap.Any("metric", metric),
+ zap.Any("metadatas", metadatas),
+ zap.Error(err),
+ )
+ // Rewind buffer and clear out the encoder error.
+ encoder.Truncate(sizeBefore)
+ encoder.Unlock()
+ w.metrics.encodeErrors.Inc(1)
+ return err
+ }
+
+ // If the buffer size is not big enough, do nothing.
+ if sizeAfter := encoder.Len(); sizeAfter < w.flushSize {
+ encoder.Unlock()
+ return nil
+ }
+
+ // Otherwise we enqueue the current buffer.
+ buffer := w.prepareEnqueueBufferWithLock(encoder, sizeBefore)
+ encoder.Unlock()
+ return w.enqueueBuffer(buffer)
+}
+
+func (w *writer) encodePassthroughWithLock(
+ encoder *lockedEncoder,
+ metric aggregated.Metric,
+ storagePolicy policy.StoragePolicy,
+) error {
+ encoder.Lock()
+
+ sizeBefore := encoder.Len()
+ msg := encoding.UnaggregatedMessageUnion{
+ Type: encoding.PassthroughMetricWithMetadataType,
+ PassthroughMetricWithMetadata: aggregated.PassthroughMetricWithMetadata{
+ Metric: metric,
+ StoragePolicy: storagePolicy,
+ }}
+ if err := encoder.EncodeMessage(msg); err != nil {
+ w.log.Error("encode passthrough metric error",
+ zap.Any("metric", metric),
+ zap.Any("storagepolicy", storagePolicy),
+ zap.Error(err),
+ )
+ // Rewind buffer and clear out the encoder error.
+ encoder.Truncate(sizeBefore)
+ encoder.Unlock()
+ w.metrics.encodeErrors.Inc(1)
+ return err
+ }
+
+ // If the buffer size is not big enough, do nothing.
+ if sizeAfter := encoder.Len(); sizeAfter < w.flushSize {
+ encoder.Unlock()
+ return nil
+ }
+
+ // Otherwise we enqueue the current buffer.
+ buffer := w.prepareEnqueueBufferWithLock(encoder, sizeBefore)
+ encoder.Unlock()
+ return w.enqueueBuffer(buffer)
+}
+
// prepareEnqueueBufferWithLock prepares the writer to enqueue a
// buffer onto its instance queue. It gets a new buffer from pool,
// copies the bytes exceeding sizeBefore to it, resets the encoder
diff --git a/src/aggregator/client/writer_test.go b/src/aggregator/client/writer_test.go
index d673f2a4c4..1dcd471508 100644
--- a/src/aggregator/client/writer_test.go
+++ b/src/aggregator/client/writer_test.go
@@ -802,14 +802,15 @@ func testWriterConcurrentWriteStress(
defer ctrl.Finish()
var (
- numIter = 3000
- shard = uint32(0)
- counters = make([]unaggregated.Counter, numIter)
- timers = make([]unaggregated.BatchTimer, numIter)
- gauges = make([]unaggregated.Gauge, numIter)
- forwarded = make([]aggregated.ForwardedMetric, numIter)
- resultsLock sync.Mutex
- results [][]byte
+ numIter = 3000
+ shard = uint32(0)
+ counters = make([]unaggregated.Counter, numIter)
+ timers = make([]unaggregated.BatchTimer, numIter)
+ gauges = make([]unaggregated.Gauge, numIter)
+ forwarded = make([]aggregated.ForwardedMetric, numIter)
+ passthroughed = make([]aggregated.Metric, numIter)
+ resultsLock sync.Mutex
+ results [][]byte
)
// Construct metrics input.
@@ -841,6 +842,12 @@ func testWriterConcurrentWriteStress(
TimeNanos: int64(i),
Values: forwardedVals,
}
+ passthroughed[i] = aggregated.Metric{
+ Type: metric.GaugeType,
+ ID: []byte(fmt.Sprintf("passthroughed%d", i)),
+ TimeNanos: int64(i),
+ Value: float64(i),
+ }
}
queue := NewMockinstanceQueue(ctrl)
@@ -863,7 +870,7 @@ func testWriterConcurrentWriteStress(
w.queue = queue
var wg sync.WaitGroup
- wg.Add(4)
+ wg.Add(5)
go func() {
defer wg.Done()
@@ -940,14 +947,30 @@ func testWriterConcurrentWriteStress(
}
}()
+ go func() {
+ defer wg.Done()
+
+ for i := 0; i < numIter; i++ {
+ payload := payloadUnion{
+ payloadType: passthroughType,
+ passthrough: passthroughPayload{
+ metric: passthroughed[i],
+ storagePolicy: testPassthroughMetadata,
+ },
+ }
+ require.NoError(t, w.Write(shard, payload))
+ }
+ }()
+
wg.Wait()
w.Flush()
var (
- resCounters = make([]unaggregated.Counter, 0, numIter)
- resTimers = make([]unaggregated.BatchTimer, 0, numIter)
- resGauges = make([]unaggregated.Gauge, 0, numIter)
- resForwarded = make([]aggregated.ForwardedMetric, 0, numIter)
+ resCounters = make([]unaggregated.Counter, 0, numIter)
+ resTimers = make([]unaggregated.BatchTimer, 0, numIter)
+ resGauges = make([]unaggregated.Gauge, 0, numIter)
+ resForwarded = make([]aggregated.ForwardedMetric, 0, numIter)
+ resPassthroughed = make([]aggregated.Metric, 0, numIter)
)
for i := 0; i < len(results); i++ {
buf := bytes.NewBuffer(results[i])
@@ -971,6 +994,10 @@ func testWriterConcurrentWriteStress(
require.Equal(t, testForwardMetadata, msgResult.ForwardedMetricWithMetadata.ForwardMetadata)
metric := cloneForwardedMetric(msgResult.ForwardedMetricWithMetadata.ForwardedMetric)
resForwarded = append(resForwarded, metric)
+ case encoding.PassthroughMetricWithMetadataType:
+ require.Equal(t, testPassthroughMetadata, msgResult.PassthroughMetricWithMetadata.StoragePolicy)
+ metric := clonePassthroughedMetric(msgResult.PassthroughMetricWithMetadata.Metric)
+ resPassthroughed = append(resPassthroughed, metric)
default:
require.Fail(t, "unrecognized message type %v", msgResult.Type)
}
@@ -1078,3 +1105,10 @@ func cloneForwardedMetric(m aggregated.ForwardedMetric) aggregated.ForwardedMetr
cloned.Values = append([]float64(nil), m.Values...)
return cloned
}
+
+func clonePassthroughedMetric(m aggregated.Metric) aggregated.Metric {
+ cloned := m
+ cloned.ID = append([]byte(nil), m.ID...)
+ cloned.Value = m.Value
+ return cloned
+}
diff --git a/src/aggregator/config/m3aggregator.yml b/src/aggregator/config/m3aggregator.yml
index 92105e8db5..9cdd5eb0e2 100644
--- a/src/aggregator/config/m3aggregator.yml
+++ b/src/aggregator/config/m3aggregator.yml
@@ -7,6 +7,8 @@ metrics:
prometheus:
onError: none
handlerPath: /metrics
+ listenAddress: 0.0.0.0:6002
+ timerType: histogram
sanitization: prometheus
samplingRate: 1.0
extended: none
@@ -262,7 +264,7 @@ aggregator:
readBufferSize: 256
forwarding:
maxSingleDelay: 5s
- entryTTL: 6h
+ entryTTL: 1h
entryCheckInterval: 10m
maxTimerBatchSizePerWrite: 140
defaultStoragePolicies:
diff --git a/src/aggregator/generated/mocks/generate.go b/src/aggregator/generated/mocks/generate.go
index f733f62adc..1b16756579 100644
--- a/src/aggregator/generated/mocks/generate.go
+++ b/src/aggregator/generated/mocks/generate.go
@@ -19,7 +19,7 @@
// THE SOFTWARE.
// mockgen rules for generating mocks for exported interfaces (reflection mode).
-//go:generate sh -c "mockgen -package=aggregator github.com/m3db/m3/src/aggregator/aggregator ElectionManager,FlushTimesManager,PlacementManager | genclean -pkg github.com/m3db/m3/src/aggregator/aggregator -out $GOPATH/src/github.com/m3db/m3/src/aggregator/aggregator/aggregator_mock.go"
+//go:generate sh -c "mockgen -package=aggregator github.com/m3db/m3/src/aggregator/aggregator Aggregator,ElectionManager,FlushTimesManager,PlacementManager | genclean -pkg github.com/m3db/m3/src/aggregator/aggregator -out $GOPATH/src/github.com/m3db/m3/src/aggregator/aggregator/aggregator_mock.go"
//go:generate sh -c "mockgen -package=client github.com/m3db/m3/src/aggregator/client Client,AdminClient | genclean -pkg github.com/m3db/m3/src/aggregator/client -out $GOPATH/src/github.com/m3db/m3/src/aggregator/client/client_mock.go"
//go:generate sh -c "mockgen -package=handler github.com/m3db/m3/src/aggregator/aggregator/handler Handler | genclean -pkg github.com/m3db/m3/src/aggregator/aggregator/handler -out $GOPATH/src/github.com/m3db/m3/src/aggregator/aggregator/handler/handler_mock.go"
//go:generate sh -c "mockgen -package=runtime github.com/m3db/m3/src/aggregator/runtime OptionsWatcher | genclean -pkg github.com/m3db/m3/src/aggregator/runtime -out $GOPATH/src/github.com/m3db/m3/src/aggregator/runtime/runtime_mock.go"
diff --git a/src/aggregator/hash/hash.go b/src/aggregator/hash/hash.go
index c78a0dceb9..6bd798a227 100644
--- a/src/aggregator/hash/hash.go
+++ b/src/aggregator/hash/hash.go
@@ -24,7 +24,7 @@
// were added.
package hash
-import "github.com/spaolacci/murmur3"
+import "github.com/m3db/stackmurmur3/v2"
// Hash128 is a 128-bit hash of an ID consisting of two unsigned 64-bit ints.
type Hash128 [2]uint64
diff --git a/src/aggregator/integration/client.go b/src/aggregator/integration/client.go
index 7551a0cc4a..b83394a813 100644
--- a/src/aggregator/integration/client.go
+++ b/src/aggregator/integration/client.go
@@ -187,6 +187,20 @@ func (c *client) writeForwardedMetricWithMetadata(
return c.writeUnaggregatedMessage(msg)
}
+func (c *client) writePassthroughMetricWithMetadata(
+ metric aggregated.Metric,
+ storagePolicy policy.StoragePolicy,
+) error {
+ msg := encoding.UnaggregatedMessageUnion{
+ Type: encoding.PassthroughMetricWithMetadataType,
+ PassthroughMetricWithMetadata: aggregated.PassthroughMetricWithMetadata{
+ Metric: metric,
+ StoragePolicy: storagePolicy,
+ },
+ }
+ return c.writeUnaggregatedMessage(msg)
+}
+
func (c *client) writeUnaggregatedMessage(
msg encoding.UnaggregatedMessageUnion,
) error {
diff --git a/src/aggregator/integration/integration_data.go b/src/aggregator/integration/integration_data.go
index 3bfa3763e5..220f8119e4 100644
--- a/src/aggregator/integration/integration_data.go
+++ b/src/aggregator/integration/integration_data.go
@@ -210,6 +210,8 @@ func generateTestDataset(opts datasetGenOpts) (testDataset, error) {
mu = generateTestForwardedMetric(metricType, opts.ids[i], timestamp.UnixNano(), intervalIdx, i, opts.valueGenOpts.forwarded)
case timedMetric:
mu = generateTestTimedMetric(metricType, opts.ids[i], timestamp.UnixNano(), intervalIdx, i, opts.valueGenOpts.timed)
+ case passthroughMetric:
+ mu = generateTestPassthroughMetric(metricType, opts.ids[i], timestamp.UnixNano(), intervalIdx, i, opts.valueGenOpts.passthrough)
default:
return nil, fmt.Errorf("unrecognized metric category: %v", opts.category)
}
@@ -277,6 +279,24 @@ func generateTestTimedMetric(
}
}
+func generateTestPassthroughMetric(
+ metricType metric.Type,
+ id string,
+ timeNanos int64,
+ intervalIdx, idIdx int,
+ valueGenOpts passthroughValueGenOpts,
+) metricUnion {
+ return metricUnion{
+ category: passthroughMetric,
+ passthrough: aggregated.Metric{
+ Type: metricType,
+ ID: metricid.RawID(id),
+ TimeNanos: timeNanos,
+ Value: valueGenOpts.passthroughValueGenFn(intervalIdx, idIdx),
+ },
+ }
+}
+
func generateTestForwardedMetric(
metricType metric.Type,
id string,
@@ -367,7 +387,7 @@ func computeExpectedAggregationBuckets(
var (
aggTypeOpts = opts.AggregationTypesOptions()
aggTypes = maggregation.NewIDDecompressor().MustDecompress(bucket.key.aggregationID)
- aggregationOpts = aggregation.NewOptions()
+ aggregationOpts = aggregation.NewOptions(opts.InstrumentOptions())
)
switch mu.Type() {
case metric.CounterType:
@@ -402,6 +422,9 @@ func computeExpectedAggregationBuckets(
values, err = addForwardedMetricToAggregation(values, mu.forwarded)
case timedMetric:
values, err = addTimedMetricToAggregation(values, mu.timed)
+ case passthroughMetric:
+ // Passthrough metrics need no aggregation.
+ err = nil
default:
err = fmt.Errorf("unrecognized metric category: %v", mu.category)
}
@@ -423,15 +446,15 @@ func addUntimedMetricToAggregation(
switch mu.Type {
case metric.CounterType:
v := values.(aggregation.Counter)
- v.Update(mu.CounterVal)
+ v.Update(time.Now(), mu.CounterVal)
return v, nil
case metric.TimerType:
v := values.(aggregation.Timer)
- v.AddBatch(mu.BatchTimerVal)
+ v.AddBatch(time.Now(), mu.BatchTimerVal)
return v, nil
case metric.GaugeType:
v := values.(aggregation.Gauge)
- v.Update(mu.GaugeVal)
+ v.Update(time.Now(), mu.GaugeVal)
return v, nil
default:
return nil, fmt.Errorf("unrecognized untimed metric type %v", mu.Type)
@@ -445,15 +468,15 @@ func addTimedMetricToAggregation(
switch mu.Type {
case metric.CounterType:
v := values.(aggregation.Counter)
- v.Update(int64(mu.Value))
+ v.Update(time.Now(), int64(mu.Value))
return v, nil
case metric.TimerType:
v := values.(aggregation.Timer)
- v.AddBatch([]float64{mu.Value})
+ v.AddBatch(time.Now(), []float64{mu.Value})
return v, nil
case metric.GaugeType:
v := values.(aggregation.Gauge)
- v.Update(mu.Value)
+ v.Update(time.Now(), mu.Value)
return v, nil
default:
return nil, fmt.Errorf("unrecognized timed metric type %v", mu.Type)
@@ -468,17 +491,17 @@ func addForwardedMetricToAggregation(
case metric.CounterType:
v := values.(aggregation.Counter)
for _, val := range mu.Values {
- v.Update(int64(val))
+ v.Update(time.Now(), int64(val))
}
return v, nil
case metric.TimerType:
v := values.(aggregation.Timer)
- v.AddBatch(mu.Values)
+ v.AddBatch(time.Now(), mu.Values)
return v, nil
case metric.GaugeType:
v := values.(aggregation.Gauge)
for _, val := range mu.Values {
- v.Update(val)
+ v.Update(time.Now(), val)
}
return v, nil
default:
@@ -691,6 +714,7 @@ const (
untimedMetric metricCategory = iota
forwardedMetric
timedMetric
+ passthroughMetric
)
func (c metricCategory) TimestampNanosFn() timestampNanosFn {
@@ -707,16 +731,21 @@ func (c metricCategory) TimestampNanosFn() timestampNanosFn {
return func(windowStartAtNanos int64, resolution time.Duration) int64 {
return windowStartAtNanos + resolution.Nanoseconds()
}
+ case passthroughMetric:
+ return func(windowStartAtNanos int64, _ time.Duration) int64 {
+ return windowStartAtNanos
+ }
default:
panic(fmt.Errorf("unknown category type: %v", c))
}
}
type metricUnion struct {
- category metricCategory
- untimed unaggregated.MetricUnion
- forwarded aggregated.ForwardedMetric
- timed aggregated.Metric
+ category metricCategory
+ untimed unaggregated.MetricUnion
+ forwarded aggregated.ForwardedMetric
+ timed aggregated.Metric
+ passthrough aggregated.Metric
}
func (mu metricUnion) Type() metric.Type {
@@ -727,6 +756,8 @@ func (mu metricUnion) Type() metric.Type {
return mu.forwarded.Type
case timedMetric:
return mu.timed.Type
+ case passthroughMetric:
+ return mu.passthrough.Type
default:
panic(fmt.Errorf("unknown category type: %v", mu.category))
}
@@ -740,6 +771,8 @@ func (mu metricUnion) ID() metricid.RawID {
return mu.forwarded.ID
case timedMetric:
return mu.timed.ID
+ case passthroughMetric:
+ return mu.passthrough.ID
default:
panic(fmt.Errorf("unknown category type: %v", mu.category))
}
@@ -752,16 +785,18 @@ const (
stagedMetadatasType
forwardMetadataType
timedMetadataType
+ passthroughMetadataType
)
type metadataFn func(idx int) metadataUnion
type metadataUnion struct {
- mType metadataType
- policiesList policy.PoliciesList
- stagedMetadatas metadata.StagedMetadatas
- forwardMetadata metadata.ForwardMetadata
- timedMetadata metadata.TimedMetadata
+ mType metadataType
+ policiesList policy.PoliciesList
+ stagedMetadatas metadata.StagedMetadatas
+ forwardMetadata metadata.ForwardMetadata
+ timedMetadata metadata.TimedMetadata
+ passthroughMetadata policy.StoragePolicy
}
func (mu metadataUnion) expectedAggregationKeys(
@@ -777,6 +812,8 @@ func (mu metadataUnion) expectedAggregationKeys(
return computeExpectedAggregationKeysFromForwardMetadata(mu.forwardMetadata), nil
case timedMetadataType:
return computeExpectedAggregationKeysFromTimedMetadata(mu.timedMetadata), nil
+ case passthroughMetadataType:
+ return computeExpectedAggregationKeysFromPassthroughMetadata(mu.passthroughMetadata), nil
default:
return nil, fmt.Errorf("unexpected metadata type: %v", mu.mType)
}
@@ -873,6 +910,17 @@ func computeExpectedAggregationKeysFromTimedMetadata(
}
}
+func computeExpectedAggregationKeysFromPassthroughMetadata(
+ metadata policy.StoragePolicy,
+) aggregationKeys {
+ return aggregationKeys{
+ {
+ aggregationID: maggregation.DefaultID,
+ storagePolicy: metadata,
+ },
+ }
+}
+
func computeExpectedAggregationKeysFromForwardMetadata(
metadata metadata.ForwardMetadata,
) aggregationKeys {
@@ -947,6 +995,21 @@ var defaultTimedValueGenOpts = timedValueGenOpts{
timedValueGenFn: defaultTimedValueGenFn,
}
+type passthroughValueGenFn func(intervalIdx, idIdx int) float64
+
+func defaultPassthroughValueGenFn(intervalIdx, _ int) float64 {
+ testVal := 123.456
+ return testVal + float64(intervalIdx)
+}
+
+type passthroughValueGenOpts struct {
+ passthroughValueGenFn passthroughValueGenFn
+}
+
+var defaultPassthroughValueGenOpts = passthroughValueGenOpts{
+ passthroughValueGenFn: defaultPassthroughValueGenFn,
+}
+
type forwardedValueGenFn func(intervalIdx, idIdx int) []float64
func defaultForwardedValueGenFn(intervalIdx, _ int) []float64 {
@@ -967,15 +1030,17 @@ var defaultForwardedValueGenOpts = forwardedValueGenOpts{
}
type valueGenOpts struct {
- untimed untimedValueGenOpts
- timed timedValueGenOpts
- forwarded forwardedValueGenOpts
+ untimed untimedValueGenOpts
+ timed timedValueGenOpts
+ forwarded forwardedValueGenOpts
+ passthrough passthroughValueGenOpts
}
var defaultValueGenOpts = valueGenOpts{
- untimed: defaultUntimedValueGenOpts,
- timed: defaultTimedValueGenOpts,
- forwarded: defaultForwardedValueGenOpts,
+ untimed: defaultUntimedValueGenOpts,
+ timed: defaultTimedValueGenOpts,
+ forwarded: defaultForwardedValueGenOpts,
+ passthrough: defaultPassthroughValueGenOpts,
}
type datasetGenOpts struct {
diff --git a/src/aggregator/integration/multi_server_forwarding_pipeline_test.go b/src/aggregator/integration/multi_server_forwarding_pipeline_test.go
index 85221ce080..69e2357c88 100644
--- a/src/aggregator/integration/multi_server_forwarding_pipeline_test.go
+++ b/src/aggregator/integration/multi_server_forwarding_pipeline_test.go
@@ -158,7 +158,8 @@ func testMultiServerForwardingPipeline(t *testing.T, discardNaNAggregatedValues
connectionOpts := aggclient.NewConnectionOptions().
SetInitReconnectThreshold(1).
SetMaxReconnectThreshold(1).
- SetMaxReconnectDuration(2 * time.Second)
+ SetMaxReconnectDuration(2 * time.Second).
+ SetWriteTimeout(time.Second)
// Create servers.
servers := make([]*testServerSetup, 0, len(multiServerSetup))
@@ -397,8 +398,9 @@ func testMultiServerForwardingPipeline(t *testing.T, discardNaNAggregatedValues
continue
}
currTime := start.Add(time.Duration(i+1) * storagePolicy.Resolution().Window)
- agg := aggregation.NewGauge(aggregation.NewOptions())
- agg.Update(expectedValuesList[spIdx][i])
+ instrumentOpts := aggregatorOpts.InstrumentOptions()
+ agg := aggregation.NewGauge(aggregation.NewOptions(instrumentOpts))
+ agg.Update(time.Now(), expectedValuesList[spIdx][i])
expectedValuesByTimeList[spIdx][currTime.UnixNano()] = agg
}
}
diff --git a/src/aggregator/integration/one_client_passthru_test.go b/src/aggregator/integration/one_client_passthru_test.go
new file mode 100644
index 0000000000..c63134d66e
--- /dev/null
+++ b/src/aggregator/integration/one_client_passthru_test.go
@@ -0,0 +1,183 @@
+// +build integration
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package integration
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/cluster/placement"
+ "github.com/m3db/m3/src/metrics/metric"
+ "github.com/m3db/m3/src/metrics/metric/aggregated"
+ "github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/x/clock"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestOneClientPassthroughMetrics(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow()
+ }
+
+ serverOpts := newTestServerOptions()
+
+ // Clock setup.
+ var lock sync.RWMutex
+ now := time.Now().Truncate(time.Hour)
+ getNowFn := func() time.Time {
+ lock.RLock()
+ t := now
+ lock.RUnlock()
+ return t
+ }
+ setNowFn := func(t time.Time) {
+ lock.Lock()
+ now = t
+ lock.Unlock()
+ }
+ clockOpts := clock.NewOptions().SetNowFn(getNowFn)
+ serverOpts = serverOpts.SetClockOptions(clockOpts)
+
+ // Placement setup.
+ numShards := 1024
+ cfg := placementInstanceConfig{
+ instanceID: serverOpts.InstanceID(),
+ shardSetID: serverOpts.ShardSetID(),
+ shardStartInclusive: 0,
+ shardEndExclusive: uint32(numShards),
+ }
+ instance := cfg.newPlacementInstance()
+ placement := newPlacement(numShards, []placement.Instance{instance})
+ placementKey := serverOpts.PlacementKVKey()
+ placementStore := serverOpts.KVStore()
+ require.NoError(t, setPlacement(placementKey, placementStore, placement))
+
+ // Create server.
+ testServer := newTestServerSetup(t, serverOpts)
+ defer testServer.close()
+
+ // Start the server.
+ log := testServer.aggregatorOpts.InstrumentOptions().Logger()
+ log.Info("test one client sending of passthrough metrics")
+ require.NoError(t, testServer.startServer())
+ log.Info("server is now up")
+ require.NoError(t, testServer.waitUntilLeader())
+ log.Info("server is now the leader")
+
+ var (
+ idPrefix = "full.passthru.id"
+ numIDs = 10
+ start = getNowFn()
+ stop = start.Add(10 * time.Second)
+ interval = 2 * time.Second
+ )
+ client := testServer.newClient()
+ require.NoError(t, client.connect())
+ defer client.close()
+
+ ids := generateTestIDs(idPrefix, numIDs)
+ metadataFn := func(idx int) metadataUnion {
+ return metadataUnion{
+ mType: passthroughMetadataType,
+ passthroughMetadata: policy.NewStoragePolicy(2*time.Second, xtime.Second, time.Hour),
+ }
+ }
+ dataset := mustGenerateTestDataset(t, datasetGenOpts{
+ start: start,
+ stop: stop,
+ interval: interval,
+ ids: ids,
+ category: passthroughMetric,
+ typeFn: constantMetricTypeFnFactory(metric.GaugeType),
+ valueGenOpts: defaultValueGenOpts,
+ metadataFn: metadataFn,
+ })
+
+ for _, data := range dataset {
+ setNowFn(data.timestamp)
+ for _, mm := range data.metricWithMetadatas {
+ require.NoError(t, client.writePassthroughMetricWithMetadata(mm.metric.passthrough, mm.metadata.passthroughMetadata))
+ }
+ require.NoError(t, client.flush())
+
+ // Give server some time to process the incoming packets.
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // Move time forward and wait for flushing to happen.
+ finalTime := stop.Add(time.Minute + 2*time.Second)
+ setNowFn(finalTime)
+ time.Sleep(2 * time.Second)
+
+ // Stop the server.
+ require.NoError(t, testServer.stopServer())
+ log.Info("server is now down")
+
+ // Validate results.
+ expected := computeExpectedPassthroughResults(t, dataset)
+ actual := testServer.sortedResults()
+ require.Equal(t, dedupResults(expected), dedupResults(actual))
+}
+
+func computeExpectedPassthroughResults(
+ t *testing.T,
+ dataset testDataset,
+) []aggregated.MetricWithStoragePolicy {
+ var expected []aggregated.MetricWithStoragePolicy
+ for _, testData := range dataset {
+ for _, metricWithMetadata := range testData.metricWithMetadatas {
+ require.Equal(t, passthroughMetric, metricWithMetadata.metric.category)
+
+ expectedPassthrough := aggregated.MetricWithStoragePolicy{
+ Metric: metricWithMetadata.metric.passthrough,
+ StoragePolicy: metricWithMetadata.metadata.passthroughMetadata,
+ }
+
+ // The capturingWriter writes ChunkedMetricWithStoragePolicy which has no metric type defined.
+ expectedPassthrough.Metric.Type = metric.UnknownType
+ expected = append(expected, expectedPassthrough)
+ }
+ }
+ // Sort the aggregated metrics.
+ sort.Sort(byTimeIDPolicyAscending(expected))
+ return expected
+}
+
+func dedupResults(
+ results []aggregated.MetricWithStoragePolicy,
+) []aggregated.MetricWithStoragePolicy {
+ var deduped []aggregated.MetricWithStoragePolicy
+ lenDeduped := 0
+ for _, m := range results {
+ if lenDeduped == 0 || !reflect.DeepEqual(deduped[lenDeduped-1], m) {
+ deduped = append(deduped, m)
+ lenDeduped++
+ }
+ }
+ return deduped
+}
diff --git a/src/aggregator/integration/setup.go b/src/aggregator/integration/setup.go
index 6ec6edebb7..e34262780b 100644
--- a/src/aggregator/integration/setup.go
+++ b/src/aggregator/integration/setup.go
@@ -34,6 +34,7 @@ import (
aggclient "github.com/m3db/m3/src/aggregator/client"
"github.com/m3db/m3/src/aggregator/runtime"
httpserver "github.com/m3db/m3/src/aggregator/server/http"
+ m3msgserver "github.com/m3db/m3/src/aggregator/server/m3msg"
rawtcpserver "github.com/m3db/m3/src/aggregator/server/rawtcp"
"github.com/m3db/m3/src/cluster/placement"
"github.com/m3db/m3/src/cluster/services"
@@ -43,6 +44,7 @@ import (
"github.com/m3db/m3/src/metrics/pipeline/applied"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
xsync "github.com/m3db/m3/src/x/sync"
"github.com/stretchr/testify/require"
@@ -56,8 +58,10 @@ var (
type testServerSetup struct {
opts testServerOptions
+ m3msgAddr string
rawTCPAddr string
httpAddr string
+ m3msgServerOpts m3msgserver.Options
rawTCPServerOpts rawtcpserver.Options
httpServerOpts httpserver.Options
aggregator aggregator.Aggregator
@@ -81,12 +85,18 @@ func newTestServerSetup(t *testing.T, opts testServerOptions) *testServerSetup {
opts = newTestServerOptions()
}
+ // TODO: based on environment variable, use M3MSG aggregator as default
+ // server and client, run both legacy and M3MSG tests by setting it to
+ // different type in the Makefile.
+
// Set up worker pool.
workerPool := xsync.NewWorkerPool(opts.WorkerPoolSize())
workerPool.Init()
// Create the server options.
- rawTCPServerOpts := rawtcpserver.NewOptions()
+ rwOpts := xio.NewOptions()
+ rawTCPServerOpts := rawtcpserver.NewOptions().SetRWOptions(rwOpts)
+ m3msgServerOpts := m3msgserver.NewOptions()
httpServerOpts := httpserver.NewOptions()
// Creating the aggregator options.
@@ -159,8 +169,12 @@ func newTestServerSetup(t *testing.T, opts testServerOptions) *testServerSetup {
SetClockOptions(clockOpts).
SetConnectionOptions(opts.ClientConnectionOptions()).
SetShardFn(opts.ShardFn()).
- SetStagedPlacementWatcherOptions(placementWatcherOpts)
- adminClient := aggclient.NewClient(clientOpts).(aggclient.AdminClient)
+ SetStagedPlacementWatcherOptions(placementWatcherOpts).
+ SetRWOptions(rwOpts)
+ c, err := aggclient.NewClient(clientOpts)
+ require.NoError(t, err)
+ adminClient, ok := c.(aggclient.AdminClient)
+ require.True(t, ok)
require.NoError(t, adminClient.Init())
aggregatorOpts = aggregatorOpts.SetAdminClient(adminClient)
@@ -170,7 +184,11 @@ func newTestServerSetup(t *testing.T, opts testServerOptions) *testServerSetup {
resultLock sync.Mutex
)
handler := &capturingHandler{results: &results, resultLock: &resultLock}
- aggregatorOpts = aggregatorOpts.SetFlushHandler(handler)
+ pw, err := handler.NewWriter(tally.NoopScope)
+ if err != nil {
+ panic(err.Error())
+ }
+ aggregatorOpts = aggregatorOpts.SetFlushHandler(handler).SetPassthroughWriter(pw)
// Set up entry pool.
runtimeOpts := runtime.NewOptions()
@@ -204,6 +222,7 @@ func newTestServerSetup(t *testing.T, opts testServerOptions) *testServerSetup {
rawTCPAddr: opts.RawTCPAddr(),
httpAddr: opts.HTTPAddr(),
rawTCPServerOpts: rawTCPServerOpts,
+ m3msgServerOpts: m3msgServerOpts,
httpServerOpts: httpServerOpts,
aggregatorOpts: aggregatorOpts,
handler: handler,
@@ -245,16 +264,20 @@ func (ts *testServerSetup) startServer() error {
}
instrumentOpts := instrument.NewOptions()
+ serverOpts := serve.NewOptions(instrumentOpts).
+ SetM3MsgAddr(ts.m3msgAddr).
+ SetM3MsgServerOpts(ts.m3msgServerOpts).
+ SetRawTCPAddr(ts.rawTCPAddr).
+ SetRawTCPServerOpts(ts.rawTCPServerOpts).
+ SetHTTPAddr(ts.httpAddr).
+ SetHTTPServerOpts(ts.httpServerOpts).
+ SetRWOptions(xio.NewOptions())
go func() {
if err := serve.Serve(
- ts.rawTCPAddr,
- ts.rawTCPServerOpts,
- ts.httpAddr,
- ts.httpServerOpts,
ts.aggregator,
ts.doneCh,
- instrumentOpts,
+ serverOpts,
); err != nil {
select {
case errCh <- err:
diff --git a/src/aggregator/server/http/server.go b/src/aggregator/server/http/server.go
index 37158db51e..11ce369830 100644
--- a/src/aggregator/server/http/server.go
+++ b/src/aggregator/server/http/server.go
@@ -108,4 +108,5 @@ func (s *server) Close() {
if s.listener != nil {
s.listener.Close()
}
+ s.listener = nil
}
diff --git a/src/aggregator/server/m3msg/options.go b/src/aggregator/server/m3msg/options.go
new file mode 100644
index 0000000000..7186f3a4f3
--- /dev/null
+++ b/src/aggregator/server/m3msg/options.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package m3msg
+
+import (
+ "errors"
+
+ "github.com/m3db/m3/src/msg/consumer"
+ "github.com/m3db/m3/src/x/instrument"
+ xserver "github.com/m3db/m3/src/x/server"
+)
+
+var (
+ errNoInstrumentOptions = errors.New("no instrument options")
+ errNoServerOptions = errors.New("no server options")
+ errNoConsumerOptions = errors.New("no consumer options")
+)
+
+// Options is a set of M3Msg options.
+type Options interface {
+ // Validate validates the options.
+ Validate() error
+
+ // SetInstrumentOptions sets the instrument options.
+ SetInstrumentOptions(value instrument.Options) Options
+
+ // InstrumentOptions returns the instrument options.
+ InstrumentOptions() instrument.Options
+
+ // SetServerOptions sets the server options.
+ SetServerOptions(value xserver.Options) Options
+
+ // ServerOptions returns the server options.
+ ServerOptions() xserver.Options
+
+ // SetConsumerOptions sets the consumer options.
+ SetConsumerOptions(value consumer.Options) Options
+
+ // ConsumerOptions returns the consumer options.
+ ConsumerOptions() consumer.Options
+}
+
+type options struct {
+ instrumentOpts instrument.Options
+ serverOpts xserver.Options
+ consumerOpts consumer.Options
+}
+
+// NewOptions returns a set of M3Msg options.
+func NewOptions() Options {
+ return &options{}
+}
+
+func (o *options) Validate() error {
+ if o.instrumentOpts == nil {
+ return errNoInstrumentOptions
+ }
+ if o.serverOpts == nil {
+ return errNoServerOptions
+ }
+ if o.consumerOpts == nil {
+ return errNoConsumerOptions
+ }
+ return nil
+}
+
+func (o *options) SetInstrumentOptions(value instrument.Options) Options {
+ opts := *o
+ opts.instrumentOpts = value
+ return &opts
+}
+
+func (o *options) InstrumentOptions() instrument.Options {
+ return o.instrumentOpts
+}
+
+func (o *options) SetServerOptions(value xserver.Options) Options {
+ opts := *o
+ opts.serverOpts = value
+ return &opts
+}
+
+func (o *options) ServerOptions() xserver.Options {
+ return o.serverOpts
+}
+
+func (o *options) SetConsumerOptions(value consumer.Options) Options {
+ opts := *o
+ opts.consumerOpts = value
+ return &opts
+}
+
+func (o *options) ConsumerOptions() consumer.Options {
+ return o.consumerOpts
+}
diff --git a/src/aggregator/server/m3msg/server.go b/src/aggregator/server/m3msg/server.go
new file mode 100644
index 0000000000..4e33279b0e
--- /dev/null
+++ b/src/aggregator/server/m3msg/server.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package m3msg
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/m3db/m3/src/aggregator/aggregator"
+ "github.com/m3db/m3/src/metrics/encoding"
+ "github.com/m3db/m3/src/metrics/encoding/protobuf"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
+ "github.com/m3db/m3/src/msg/consumer"
+ xserver "github.com/m3db/m3/src/x/server"
+
+ "go.uber.org/zap"
+)
+
+type server struct {
+ aggregator aggregator.Aggregator
+ logger *zap.Logger
+}
+
+// NewServer creates a new M3Msg server.
+func NewServer(
+ address string,
+ aggregator aggregator.Aggregator,
+ opts Options,
+) (xserver.Server, error) {
+ if err := opts.Validate(); err != nil {
+ return nil, err
+ }
+
+ s := &server{
+ aggregator: aggregator,
+ logger: opts.InstrumentOptions().Logger(),
+ }
+
+ handler := consumer.NewConsumerHandler(s.Consume, opts.ConsumerOptions())
+ return xserver.NewServer(address, handler, opts.ServerOptions()), nil
+}
+
+func (s *server) Consume(c consumer.Consumer) {
+ var (
+ pb = &metricpb.MetricWithMetadatas{}
+ union = &encoding.UnaggregatedMessageUnion{}
+ msgErr error
+ msg consumer.Message
+ )
+ for {
+ msg, msgErr = c.Message()
+ if msgErr != nil {
+ break
+ }
+
+ err := s.handleMessage(pb, union, msg)
+ if err != nil {
+ s.logger.Error("could not process message", zap.Error(err))
+ }
+ }
+ if msgErr != nil && msgErr != io.EOF {
+ s.logger.Error("could not read message", zap.Error(msgErr))
+ }
+ c.Close()
+}
+
+func (s *server) handleMessage(
+ pb *metricpb.MetricWithMetadatas,
+ union *encoding.UnaggregatedMessageUnion,
+ msg consumer.Message,
+) error {
+ defer msg.Ack()
+
+ // Reset and reuse the protobuf message for unpacking.
+ protobuf.ReuseMetricWithMetadatasProto(pb)
+
+ // Unmarshal the message.
+ if err := pb.Unmarshal(msg.Bytes()); err != nil {
+ return err
+ }
+
+ switch pb.Type {
+ case metricpb.MetricWithMetadatas_COUNTER_WITH_METADATAS:
+ err := union.CounterWithMetadatas.FromProto(pb.CounterWithMetadatas)
+ if err != nil {
+ return err
+ }
+ return s.aggregator.AddUntimed(
+ union.CounterWithMetadatas.ToUnion(),
+ union.CounterWithMetadatas.StagedMetadatas)
+ case metricpb.MetricWithMetadatas_BATCH_TIMER_WITH_METADATAS:
+ err := union.BatchTimerWithMetadatas.FromProto(pb.BatchTimerWithMetadatas)
+ if err != nil {
+ return err
+ }
+ return s.aggregator.AddUntimed(
+ union.BatchTimerWithMetadatas.ToUnion(),
+ union.BatchTimerWithMetadatas.StagedMetadatas)
+ case metricpb.MetricWithMetadatas_GAUGE_WITH_METADATAS:
+ err := union.GaugeWithMetadatas.FromProto(pb.GaugeWithMetadatas)
+ if err != nil {
+ return err
+ }
+ return s.aggregator.AddUntimed(
+ union.GaugeWithMetadatas.ToUnion(),
+ union.GaugeWithMetadatas.StagedMetadatas)
+ case metricpb.MetricWithMetadatas_FORWARDED_METRIC_WITH_METADATA:
+ err := union.ForwardedMetricWithMetadata.FromProto(pb.ForwardedMetricWithMetadata)
+ if err != nil {
+ return err
+ }
+ return s.aggregator.AddForwarded(
+ union.ForwardedMetricWithMetadata.ForwardedMetric,
+ union.ForwardedMetricWithMetadata.ForwardMetadata)
+ case metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATA:
+ err := union.TimedMetricWithMetadata.FromProto(pb.TimedMetricWithMetadata)
+ if err != nil {
+ return err
+ }
+ return s.aggregator.AddTimed(
+ union.TimedMetricWithMetadata.Metric,
+ union.TimedMetricWithMetadata.TimedMetadata)
+ case metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATAS:
+ err := union.TimedMetricWithMetadatas.FromProto(pb.TimedMetricWithMetadatas)
+ if err != nil {
+ return err
+ }
+ return s.aggregator.AddTimedWithStagedMetadatas(
+ union.TimedMetricWithMetadatas.Metric,
+ union.TimedMetricWithMetadatas.StagedMetadatas)
+ default:
+ return fmt.Errorf("unrecognized message type: %v", pb.Type)
+ }
+}
diff --git a/src/aggregator/server/rawtcp/options.go b/src/aggregator/server/rawtcp/options.go
index 1f2a0c8b4e..314c6c7a82 100644
--- a/src/aggregator/server/rawtcp/options.go
+++ b/src/aggregator/server/rawtcp/options.go
@@ -25,6 +25,7 @@ import (
"github.com/m3db/m3/src/metrics/encoding/protobuf"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/server"
)
@@ -79,6 +80,12 @@ type Options interface {
// ErrorLogLimitPerSecond returns the error log limit per second.
ErrorLogLimitPerSecond() int64
+
+ // SetRWOptions sets RW options.
+ SetRWOptions(value xio.Options) Options
+
+ // RWOptions returns the RW options.
+ RWOptions() xio.Options
}
type options struct {
@@ -89,6 +96,7 @@ type options struct {
protobufItOpts protobuf.UnaggregatedOptions
readBufferSize int
errLogLimitPerSecond int64
+ rwOpts xio.Options
}
// NewOptions creates a new set of server options.
@@ -101,6 +109,7 @@ func NewOptions() Options {
protobufItOpts: protobuf.NewUnaggregatedOptions(),
readBufferSize: defaultReadBufferSize,
errLogLimitPerSecond: defaultErrorLogLimitPerSecond,
+ rwOpts: xio.NewOptions(),
}
}
@@ -173,3 +182,13 @@ func (o *options) SetErrorLogLimitPerSecond(value int64) Options {
func (o *options) ErrorLogLimitPerSecond() int64 {
return o.errLogLimitPerSecond
}
+
+func (o *options) SetRWOptions(value xio.Options) Options {
+ opts := *o
+ opts.rwOpts = value
+ return &opts
+}
+
+func (o *options) RWOptions() xio.Options {
+ return o.rwOpts
+}
diff --git a/src/aggregator/server/rawtcp/server.go b/src/aggregator/server/rawtcp/server.go
index adca4872cd..22c2be6aa0 100644
--- a/src/aggregator/server/rawtcp/server.go
+++ b/src/aggregator/server/rawtcp/server.go
@@ -38,6 +38,8 @@ import (
"github.com/m3db/m3/src/metrics/metadata"
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/policy"
+ xio "github.com/m3db/m3/src/x/io"
xserver "github.com/m3db/m3/src/x/server"
"github.com/uber-go/tally"
@@ -61,6 +63,7 @@ type handlerMetrics struct {
addUntimedErrors tally.Counter
addTimedErrors tally.Counter
addForwardedErrors tally.Counter
+ addPassthroughErrors tally.Counter
unknownErrorTypeErrors tally.Counter
decodeErrors tally.Counter
errLogRateLimited tally.Counter
@@ -72,6 +75,7 @@ func newHandlerMetrics(scope tally.Scope) handlerMetrics {
addUntimedErrors: scope.Counter("add-untimed-errors"),
addTimedErrors: scope.Counter("add-timed-errors"),
addForwardedErrors: scope.Counter("add-forwarded-errors"),
+ addPassthroughErrors: scope.Counter("add-passthrough-errors"),
unknownErrorTypeErrors: scope.Counter("unknown-error-type-errors"),
decodeErrors: scope.Counter("decode-errors"),
errLogRateLimited: scope.Counter("error-log-rate-limited"),
@@ -90,6 +94,8 @@ type handler struct {
errLogRateLimiter *rate.Limiter
rand *rand.Rand
metrics handlerMetrics
+
+ opts Options
}
// NewHandler creates a new raw TCP handler.
@@ -109,6 +115,7 @@ func NewHandler(aggregator aggregator.Aggregator, opts Options) xserver.Handler
errLogRateLimiter: limiter,
rand: rand.New(rand.NewSource(nowFn().UnixNano())),
metrics: newHandlerMetrics(iOpts.MetricsScope()),
+ opts: opts,
}
}
@@ -118,19 +125,23 @@ func (s *handler) Handle(conn net.Conn) {
remoteAddress = remoteAddr.String()
}
- reader := bufio.NewReaderSize(conn, s.readBufferSize)
+ rOpts := xio.ResettableReaderOptions{ReadBufferSize: s.readBufferSize}
+ read := s.opts.RWOptions().ResettableReaderFn()(conn, rOpts)
+ reader := bufio.NewReaderSize(read, s.readBufferSize)
it := migration.NewUnaggregatedIterator(reader, s.msgpackItOpts, s.protobufItOpts)
defer it.Close()
// Iterate over the incoming metrics stream and queue up metrics.
var (
- untimedMetric unaggregated.MetricUnion
- stagedMetadatas metadata.StagedMetadatas
- forwardedMetric aggregated.ForwardedMetric
- forwardMetadata metadata.ForwardMetadata
- timedMetric aggregated.Metric
- timedMetadata metadata.TimedMetadata
- err error
+ untimedMetric unaggregated.MetricUnion
+ stagedMetadatas metadata.StagedMetadatas
+ forwardedMetric aggregated.ForwardedMetric
+ forwardMetadata metadata.ForwardMetadata
+ timedMetric aggregated.Metric
+ timedMetadata metadata.TimedMetadata
+ passthroughMetric aggregated.Metric
+ passthroughMetadata policy.StoragePolicy
+ err error
)
for it.Next() {
current := it.Current()
@@ -155,6 +166,14 @@ func (s *handler) Handle(conn net.Conn) {
timedMetric = current.TimedMetricWithMetadata.Metric
timedMetadata = current.TimedMetricWithMetadata.TimedMetadata
err = toAddTimedError(s.aggregator.AddTimed(timedMetric, timedMetadata))
+ case encoding.TimedMetricWithMetadatasType:
+ timedMetric = current.TimedMetricWithMetadatas.Metric
+ stagedMetadatas = current.TimedMetricWithMetadatas.StagedMetadatas
+ err = toAddTimedError(s.aggregator.AddTimedWithStagedMetadatas(timedMetric, stagedMetadatas))
+ case encoding.PassthroughMetricWithMetadataType:
+ passthroughMetric = current.PassthroughMetricWithMetadata.Metric
+ passthroughMetadata = current.PassthroughMetricWithMetadata.StoragePolicy
+ err = toAddPassthroughError(s.aggregator.AddPassthrough(passthroughMetric, passthroughMetadata))
default:
err = newUnknownMessageTypeError(current.Type)
}
@@ -203,6 +222,15 @@ func (s *handler) Handle(conn net.Conn) {
zap.Float64("value", timedMetric.Value),
zap.Error(err),
)
+ case addPassthroughError:
+ s.metrics.addPassthroughErrors.Inc(1)
+ s.log.Error("error adding passthrough metric",
+ zap.String("remoteAddress", remoteAddress),
+ zap.Stringer("id", timedMetric.ID),
+ zap.Time("timestamp", time.Unix(0, timedMetric.TimeNanos)),
+ zap.Float64("value", timedMetric.Value),
+ zap.Error(err),
+ )
default:
s.metrics.unknownErrorTypeErrors.Inc(1)
s.log.Error("unknown error type",
@@ -281,3 +309,16 @@ func toAddForwardedError(err error) error {
}
func (e addForwardedError) Error() string { return e.err.Error() }
+
+type addPassthroughError struct {
+ err error
+}
+
+func toAddPassthroughError(err error) error {
+ if err == nil {
+ return nil
+ }
+ return addPassthroughError{err: err}
+}
+
+func (e addPassthroughError) Error() string { return e.err.Error() }
diff --git a/src/aggregator/server/rawtcp/server_test.go b/src/aggregator/server/rawtcp/server_test.go
index bb257a1f74..4e16f2c365 100644
--- a/src/aggregator/server/rawtcp/server_test.go
+++ b/src/aggregator/server/rawtcp/server_test.go
@@ -70,7 +70,7 @@ var (
}
testTimed = aggregated.Metric{
Type: metric.CounterType,
- ID: []byte("testForwarded"),
+ ID: []byte("testTimed"),
TimeNanos: 12345,
Value: -13,
}
@@ -80,6 +80,12 @@ var (
TimeNanos: 12345,
Values: []float64{908, -13},
}
+ testPassthrough = aggregated.Metric{
+ Type: metric.CounterType,
+ ID: []byte("testPassthrough"),
+ TimeNanos: 12345,
+ Value: -13,
+ }
testDefaultPoliciesList = policy.DefaultPoliciesList
testCustomPoliciesList = policy.PoliciesList{
policy.NewStagedPolicies(
@@ -135,6 +141,7 @@ var (
SourceID: 1234,
NumForwardedTimes: 3,
}
+ testPassthroughStoragePolicy = policy.NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour)
testCounterWithPoliciesList = unaggregated.CounterWithPoliciesList{
Counter: testCounter.Counter(),
PoliciesList: testDefaultPoliciesList,
@@ -167,6 +174,10 @@ var (
ForwardedMetric: testForwarded,
ForwardMetadata: testForwardMetadata,
}
+ testPassthroughMetricWithMetadata = aggregated.PassthroughMetricWithMetadata{
+ Metric: testPassthrough,
+ StoragePolicy: testPassthroughStoragePolicy,
+ }
testCmpOpts = []cmp.Option{
cmpopts.EquateEmpty(),
cmp.AllowUnexported(policy.StoragePolicy{}),
@@ -224,6 +235,7 @@ func testRawTCPServerHandleUnaggregated(
protocol := protocolSelector(i)
if protocol == protobufEncoding {
expectedResult.TimedMetricWithMetadata = append(expectedResult.TimedMetricWithMetadata, testTimedMetricWithMetadata)
+ expectedResult.PassthroughMetricWithMetadata = append(expectedResult.PassthroughMetricWithMetadata, testPassthroughMetricWithMetadata)
expectedResult.ForwardedMetricsWithMetadata = append(expectedResult.ForwardedMetricsWithMetadata, testForwardedMetricWithMetadata)
expectedTotalMetrics += 5
} else {
@@ -262,6 +274,10 @@ func testRawTCPServerHandleUnaggregated(
Type: encoding.TimedMetricWithMetadataType,
TimedMetricWithMetadata: testTimedMetricWithMetadata,
}))
+ require.NoError(t, encoder.EncodeMessage(encoding.UnaggregatedMessageUnion{
+ Type: encoding.PassthroughMetricWithMetadataType,
+ PassthroughMetricWithMetadata: testPassthroughMetricWithMetadata,
+ }))
require.NoError(t, encoder.EncodeMessage(encoding.UnaggregatedMessageUnion{
Type: encoding.ForwardedMetricWithMetadataType,
ForwardedMetricWithMetadata: testForwardedMetricWithMetadata,
diff --git a/src/aggregator/server/server.go b/src/aggregator/server/server.go
new file mode 100644
index 0000000000..0bcb967a8c
--- /dev/null
+++ b/src/aggregator/server/server.go
@@ -0,0 +1,199 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package server
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ m3aggregator "github.com/m3db/m3/src/aggregator/aggregator"
+ "github.com/m3db/m3/src/cmd/services/m3aggregator/config"
+ "github.com/m3db/m3/src/cmd/services/m3aggregator/serve"
+ xconfig "github.com/m3db/m3/src/x/config"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "go.uber.org/zap"
+)
+
+const (
+ gracefulShutdownTimeout = 15 * time.Second
+)
+
+// RunOptions are the server options for running the aggregator server.
+type RunOptions struct {
+ // Config is the aggregator configuration.
+ Config config.Configuration
+
+ // AdminOptions are additional options to apply to the aggregator server.
+ AdminOptions []AdminOption
+}
+
+// AdminOption is an additional option to apply to the aggregator server.
+type AdminOption func(opts serve.Options) (serve.Options, error)
+
+// Run runs the aggregator server.
+func Run(opts RunOptions) {
+ cfg := opts.Config
+
+ // Create logger and metrics scope.
+ logger, err := cfg.Logging.BuildLogger()
+ if err != nil {
+ // NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
+ // sending stdlib "log" to black hole. Don't remove unless with good reason.
+ fmt.Fprintf(os.Stderr, "error creating logger: %v\n", err)
+ os.Exit(1)
+ }
+ defer logger.Sync()
+
+ xconfig.WarnOnDeprecation(cfg, logger)
+
+ scope, closer, err := cfg.Metrics.NewRootScope()
+ if err != nil {
+ logger.Fatal("error creating metrics root scope", zap.Error(err))
+ }
+ defer closer.Close()
+ instrumentOpts := instrument.NewOptions().
+ SetLogger(logger).
+ SetMetricsScope(scope).
+ SetTimerOptions(instrument.TimerOptions{StandardSampleRate: cfg.Metrics.SampleRate()}).
+ SetReportInterval(cfg.Metrics.ReportInterval())
+
+ buildReporter := instrument.NewBuildReporter(instrumentOpts)
+ if err := buildReporter.Start(); err != nil {
+ logger.Fatal("could not start build reporter", zap.Error(err))
+ }
+
+ defer buildReporter.Stop()
+
+ serverOptions := serve.NewOptions(instrumentOpts)
+ if cfg.M3Msg != nil {
+ // Create the M3Msg server options.
+ m3msgInsrumentOpts := instrumentOpts.
+ SetMetricsScope(scope.
+ SubScope("m3msg-server").
+ Tagged(map[string]string{"server": "m3msg"}))
+ m3msgServerOpts, err := cfg.M3Msg.NewServerOptions(m3msgInsrumentOpts)
+ if err != nil {
+ logger.Fatal("could not create m3msg server options", zap.Error(err))
+ }
+
+ serverOptions = serverOptions.
+ SetM3MsgAddr(cfg.M3Msg.Server.ListenAddress).
+ SetM3MsgServerOpts(m3msgServerOpts)
+ }
+
+ if cfg.RawTCP != nil {
+ // Create the raw TCP server options.
+ rawTCPInstrumentOpts := instrumentOpts.
+ SetMetricsScope(scope.
+ SubScope("rawtcp-server").
+ Tagged(map[string]string{"server": "rawtcp"}))
+
+ serverOptions = serverOptions.
+ SetRawTCPAddr(cfg.RawTCP.ListenAddress).
+ SetRawTCPServerOpts(cfg.RawTCP.NewServerOptions(rawTCPInstrumentOpts))
+ }
+
+ if cfg.HTTP != nil {
+ // Create the http server options.
+ serverOptions = serverOptions.
+ SetHTTPAddr(cfg.HTTP.ListenAddress).
+ SetHTTPServerOpts(cfg.HTTP.NewServerOptions())
+ }
+
+ for i, transform := range opts.AdminOptions {
+ if opts, err := transform(serverOptions); err != nil {
+ logger.Fatal("could not apply transform",
+ zap.Int("index", i), zap.Error(err))
+ } else {
+ serverOptions = opts
+ }
+ }
+
+ // Create the kv client.
+ client, err := cfg.KVClient.NewKVClient(instrumentOpts.
+ SetMetricsScope(scope.SubScope("kv-client")))
+ if err != nil {
+ logger.Fatal("error creating the kv client", zap.Error(err))
+ }
+
+ // Create the runtime options manager.
+ runtimeOptsManager := cfg.RuntimeOptions.NewRuntimeOptionsManager()
+
+ // Create the aggregator.
+ aggregatorOpts, err := cfg.Aggregator.NewAggregatorOptions(
+ serverOptions.RawTCPAddr(),
+ client, serverOptions, runtimeOptsManager,
+ instrumentOpts.SetMetricsScope(scope.SubScope("aggregator")))
+ if err != nil {
+ logger.Fatal("error creating aggregator options", zap.Error(err))
+ }
+ aggregator := m3aggregator.NewAggregator(aggregatorOpts)
+ if err := aggregator.Open(); err != nil {
+ logger.Fatal("error opening the aggregator", zap.Error(err))
+ }
+
+ // Watch runtime option changes after aggregator is open.
+ placementManager := aggregatorOpts.PlacementManager()
+ cfg.RuntimeOptions.WatchRuntimeOptionChanges(client, runtimeOptsManager, placementManager, logger)
+
+ doneCh := make(chan struct{})
+ closedCh := make(chan struct{})
+ go func() {
+ if err := serve.Serve(
+ aggregator,
+ doneCh,
+ serverOptions,
+ ); err != nil {
+ logger.Fatal("could not start serving traffic", zap.Error(err))
+ }
+ logger.Debug("server closed")
+ close(closedCh)
+ }()
+
+ // Handle interrupts.
+ sigC := make(chan os.Signal, 1)
+ signal.Notify(sigC, syscall.SIGINT, syscall.SIGTERM)
+
+ logger.Warn("interrupt", zap.Any("signal", fmt.Errorf("%s", <-sigC)))
+
+ if s := cfg.Aggregator.ShutdownWaitTimeout; s != 0 {
+ logger.Info("waiting intentional shutdown period", zap.Duration("waitTimeout", s))
+ select {
+ case sig := <-sigC:
+ logger.Info("second signal received, skipping shutdown wait", zap.String("signal", sig.String()))
+ case <-time.After(cfg.Aggregator.ShutdownWaitTimeout):
+ logger.Info("shutdown period elapsed")
+ }
+ }
+
+ close(doneCh)
+
+ select {
+ case <-closedCh:
+ logger.Info("server closed clean")
+ case <-time.After(gracefulShutdownTimeout):
+ logger.Info("server closed due to timeout", zap.Duration("timeout", gracefulShutdownTimeout))
+ }
+}
diff --git a/src/aggregator/sharding/hash.go b/src/aggregator/sharding/hash.go
index a7e0462a54..6172e6a6a4 100644
--- a/src/aggregator/sharding/hash.go
+++ b/src/aggregator/sharding/hash.go
@@ -26,7 +26,7 @@ import (
"github.com/m3db/m3/src/metrics/metric/id"
- "github.com/spaolacci/murmur3"
+ murmur3 "github.com/m3db/stackmurmur3/v2"
)
const (
diff --git a/src/cluster/README.md b/src/cluster/README.md
index 0698193e2f..b5c81c7133 100644
--- a/src/cluster/README.md
+++ b/src/cluster/README.md
@@ -1,16 +1,5 @@
## WARNING: This is Alpha software and not intended for use until a stable release.
-# M3Cluster [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# M3Cluster
-Cluster management interfaces used by M3 components
-
-
-
-This project is released under the [Apache License, Version 2.0](LICENSE).
-
-[doc-img]: https://godoc.org/github.com/m3db/m3cluster?status.svg
-[doc]: https://godoc.org/github.com/m3db/m3cluster
-[ci-img]: https://badge.buildkite.com/bf88d4e826bced29347a210e540cee0dfa78e2a109cdce2136.svg
-[ci]: https://buildkite.com/m3/m3cluster-ci
-[cov-img]: https://coveralls.io/repos/m3db/m3cluster/badge.svg?branch=master&service=github
-[cov]: https://coveralls.io/github/m3db/m3cluster?branch=master
+Cluster management interfaces used by M3 components.
diff --git a/src/cluster/generated/proto/placementpb/placement.pb.go b/src/cluster/generated/proto/placementpb/placement.pb.go
index 23111f6728..b90bbd8be7 100644
--- a/src/cluster/generated/proto/placementpb/placement.pb.go
+++ b/src/cluster/generated/proto/placementpb/placement.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/cluster/generated/proto/placementpb/placement.proto
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -30,6 +30,7 @@
It has these top-level messages:
Placement
Instance
+ InstanceMetadata
Shard
PlacementSnapshots
*/
@@ -146,15 +147,16 @@ func (m *Placement) GetMaxShardSetId() uint32 {
}
type Instance struct {
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- IsolationGroup string `protobuf:"bytes,2,opt,name=isolation_group,json=isolationGroup,proto3" json:"isolation_group,omitempty"`
- Zone string `protobuf:"bytes,3,opt,name=zone,proto3" json:"zone,omitempty"`
- Weight uint32 `protobuf:"varint,4,opt,name=weight,proto3" json:"weight,omitempty"`
- Endpoint string `protobuf:"bytes,5,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
- Shards []*Shard `protobuf:"bytes,6,rep,name=shards" json:"shards,omitempty"`
- ShardSetId uint32 `protobuf:"varint,7,opt,name=shard_set_id,json=shardSetId,proto3" json:"shard_set_id,omitempty"`
- Hostname string `protobuf:"bytes,8,opt,name=hostname,proto3" json:"hostname,omitempty"`
- Port uint32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"`
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ IsolationGroup string `protobuf:"bytes,2,opt,name=isolation_group,json=isolationGroup,proto3" json:"isolation_group,omitempty"`
+ Zone string `protobuf:"bytes,3,opt,name=zone,proto3" json:"zone,omitempty"`
+ Weight uint32 `protobuf:"varint,4,opt,name=weight,proto3" json:"weight,omitempty"`
+ Endpoint string `protobuf:"bytes,5,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+ Shards []*Shard `protobuf:"bytes,6,rep,name=shards" json:"shards,omitempty"`
+ ShardSetId uint32 `protobuf:"varint,7,opt,name=shard_set_id,json=shardSetId,proto3" json:"shard_set_id,omitempty"`
+ Hostname string `protobuf:"bytes,8,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ Port uint32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"`
+ Metadata *InstanceMetadata `protobuf:"bytes,10,opt,name=metadata" json:"metadata,omitempty"`
}
func (m *Instance) Reset() { *m = Instance{} }
@@ -225,6 +227,29 @@ func (m *Instance) GetPort() uint32 {
return 0
}
+func (m *Instance) GetMetadata() *InstanceMetadata {
+ if m != nil {
+ return m.Metadata
+ }
+ return nil
+}
+
+type InstanceMetadata struct {
+ DebugPort uint32 `protobuf:"varint,1,opt,name=debug_port,json=debugPort,proto3" json:"debug_port,omitempty"`
+}
+
+func (m *InstanceMetadata) Reset() { *m = InstanceMetadata{} }
+func (m *InstanceMetadata) String() string { return proto.CompactTextString(m) }
+func (*InstanceMetadata) ProtoMessage() {}
+func (*InstanceMetadata) Descriptor() ([]byte, []int) { return fileDescriptorPlacement, []int{2} }
+
+func (m *InstanceMetadata) GetDebugPort() uint32 {
+ if m != nil {
+ return m.DebugPort
+ }
+ return 0
+}
+
type Shard struct {
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
State ShardState `protobuf:"varint,2,opt,name=state,proto3,enum=placementpb.ShardState" json:"state,omitempty"`
@@ -241,7 +266,7 @@ type Shard struct {
func (m *Shard) Reset() { *m = Shard{} }
func (m *Shard) String() string { return proto.CompactTextString(m) }
func (*Shard) ProtoMessage() {}
-func (*Shard) Descriptor() ([]byte, []int) { return fileDescriptorPlacement, []int{2} }
+func (*Shard) Descriptor() ([]byte, []int) { return fileDescriptorPlacement, []int{3} }
func (m *Shard) GetId() uint32 {
if m != nil {
@@ -285,7 +310,7 @@ type PlacementSnapshots struct {
func (m *PlacementSnapshots) Reset() { *m = PlacementSnapshots{} }
func (m *PlacementSnapshots) String() string { return proto.CompactTextString(m) }
func (*PlacementSnapshots) ProtoMessage() {}
-func (*PlacementSnapshots) Descriptor() ([]byte, []int) { return fileDescriptorPlacement, []int{3} }
+func (*PlacementSnapshots) Descriptor() ([]byte, []int) { return fileDescriptorPlacement, []int{4} }
func (m *PlacementSnapshots) GetSnapshots() []*Placement {
if m != nil {
@@ -297,6 +322,7 @@ func (m *PlacementSnapshots) GetSnapshots() []*Placement {
func init() {
proto.RegisterType((*Placement)(nil), "placementpb.Placement")
proto.RegisterType((*Instance)(nil), "placementpb.Instance")
+ proto.RegisterType((*InstanceMetadata)(nil), "placementpb.InstanceMetadata")
proto.RegisterType((*Shard)(nil), "placementpb.Shard")
proto.RegisterType((*PlacementSnapshots)(nil), "placementpb.PlacementSnapshots")
proto.RegisterEnum("placementpb.ShardState", ShardState_name, ShardState_value)
@@ -459,6 +485,39 @@ func (m *Instance) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintPlacement(dAtA, i, uint64(m.Port))
}
+ if m.Metadata != nil {
+ dAtA[i] = 0x52
+ i++
+ i = encodeVarintPlacement(dAtA, i, uint64(m.Metadata.Size()))
+ n2, err := m.Metadata.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func (m *InstanceMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InstanceMetadata) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.DebugPort != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintPlacement(dAtA, i, uint64(m.DebugPort))
+ }
return i, nil
}
@@ -620,6 +679,19 @@ func (m *Instance) Size() (n int) {
if m.Port != 0 {
n += 1 + sovPlacement(uint64(m.Port))
}
+ if m.Metadata != nil {
+ l = m.Metadata.Size()
+ n += 1 + l + sovPlacement(uint64(l))
+ }
+ return n
+}
+
+func (m *InstanceMetadata) Size() (n int) {
+ var l int
+ _ = l
+ if m.DebugPort != 0 {
+ n += 1 + sovPlacement(uint64(m.DebugPort))
+ }
return n
}
@@ -1221,6 +1293,108 @@ func (m *Instance) Unmarshal(dAtA []byte) error {
break
}
}
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlacement
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPlacement
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Metadata == nil {
+ m.Metadata = &InstanceMetadata{}
+ }
+ if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlacement(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlacement
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InstanceMetadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlacement
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InstanceMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InstanceMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DebugPort", wireType)
+ }
+ m.DebugPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlacement
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DebugPort |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipPlacement(dAtA[iNdEx:])
@@ -1588,45 +1762,47 @@ func init() {
}
var fileDescriptorPlacement = []byte{
- // 628 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xdf, 0x6e, 0xd3, 0x3e,
- 0x14, 0x5e, 0xd2, 0xb5, 0x6b, 0x4e, 0xd7, 0xfe, 0x2a, 0x4b, 0xbf, 0x11, 0x0d, 0x51, 0x4a, 0xd1,
- 0x44, 0x35, 0x44, 0x23, 0x6d, 0x5c, 0xa0, 0xdd, 0x75, 0x68, 0x4c, 0x99, 0xca, 0x84, 0xdc, 0x69,
- 0x17, 0xdc, 0x44, 0x6e, 0xe2, 0xb6, 0x16, 0x8d, 0x1d, 0xd9, 0xce, 0xd8, 0x78, 0x03, 0xee, 0x78,
- 0x0f, 0x5e, 0x84, 0x4b, 0x1e, 0x01, 0x8d, 0x87, 0xe0, 0x16, 0xc5, 0x49, 0xfa, 0x47, 0xec, 0xee,
- 0x9c, 0xef, 0x7c, 0xf6, 0xf9, 0xfc, 0xf9, 0x1c, 0xb8, 0x98, 0x31, 0x3d, 0x4f, 0x27, 0x83, 0x50,
- 0xc4, 0x5e, 0x7c, 0x1c, 0x4d, 0xbc, 0xf8, 0xd8, 0x53, 0x32, 0xf4, 0xc2, 0x45, 0xaa, 0x34, 0x95,
- 0xde, 0x8c, 0x72, 0x2a, 0x89, 0xa6, 0x91, 0x97, 0x48, 0xa1, 0x85, 0x97, 0x2c, 0x48, 0x48, 0x63,
- 0xca, 0x75, 0x32, 0x59, 0xc5, 0x03, 0x53, 0x43, 0x8d, 0xb5, 0x62, 0xef, 0x8f, 0x0d, 0xce, 0x87,
- 0x32, 0x47, 0x6f, 0xc1, 0x61, 0x5c, 0x69, 0xc2, 0x43, 0xaa, 0x5c, 0xab, 0x5b, 0xe9, 0x37, 0x8e,
- 0x0e, 0x06, 0x6b, 0xf4, 0xc1, 0x92, 0x3a, 0xf0, 0x4b, 0xde, 0x19, 0xd7, 0xf2, 0x0e, 0xaf, 0xce,
- 0xa1, 0x03, 0x68, 0x49, 0x9a, 0x2c, 0x58, 0x48, 0x82, 0x29, 0x09, 0xb5, 0x90, 0xae, 0xdd, 0xb5,
- 0xfa, 0x4d, 0xdc, 0x2c, 0xd0, 0x77, 0x06, 0x44, 0x4f, 0x00, 0x78, 0x1a, 0x07, 0x6a, 0x4e, 0x64,
- 0xa4, 0xdc, 0x8a, 0xa1, 0x38, 0x3c, 0x8d, 0xc7, 0x06, 0xc8, 0xca, 0x4c, 0xe5, 0x55, 0x1a, 0xb9,
- 0xdb, 0x5d, 0xab, 0x5f, 0xc7, 0x0e, 0x53, 0xe3, 0x1c, 0x40, 0xcf, 0x60, 0x37, 0x4c, 0xb5, 0xb8,
- 0xa1, 0x32, 0xd0, 0x2c, 0xa6, 0x6e, 0xb5, 0x6b, 0xf5, 0x2b, 0xb8, 0x51, 0x60, 0x57, 0x2c, 0xa6,
- 0xe8, 0x29, 0x34, 0x98, 0x0a, 0x62, 0x26, 0xa5, 0x90, 0x34, 0x72, 0x6b, 0xe6, 0x0a, 0x60, 0xea,
- 0x7d, 0x81, 0xa0, 0x17, 0xd0, 0x8e, 0xc9, 0x6d, 0xde, 0x23, 0x50, 0x54, 0x07, 0x2c, 0x72, 0x77,
- 0x72, 0xa9, 0x31, 0xb9, 0x35, 0x9d, 0xc6, 0x54, 0xfb, 0xd1, 0xfe, 0x18, 0x5a, 0x9b, 0xcf, 0x45,
- 0x6d, 0xa8, 0x7c, 0xa2, 0x77, 0xae, 0xd5, 0xb5, 0xfa, 0x0e, 0xce, 0x42, 0xf4, 0x12, 0xaa, 0x37,
- 0x64, 0x91, 0x52, 0xf3, 0xd8, 0xc6, 0xd1, 0xff, 0x1b, 0xb6, 0x95, 0xa7, 0x71, 0xce, 0x39, 0xb1,
- 0xdf, 0x58, 0xbd, 0xaf, 0x36, 0xd4, 0x4b, 0x1c, 0xb5, 0xc0, 0x66, 0x51, 0x71, 0x9d, 0xcd, 0x32,
- 0x69, 0xff, 0x31, 0x25, 0x16, 0x44, 0x33, 0xc1, 0x83, 0x99, 0x14, 0x69, 0x62, 0xee, 0x75, 0x70,
- 0x6b, 0x09, 0x9f, 0x67, 0x28, 0x42, 0xb0, 0xfd, 0x45, 0x70, 0x6a, 0xfc, 0x73, 0xb0, 0x89, 0xd1,
- 0x1e, 0xd4, 0x3e, 0x53, 0x36, 0x9b, 0x6b, 0x63, 0x5b, 0x13, 0x17, 0x19, 0xda, 0x87, 0x3a, 0xe5,
- 0x51, 0x22, 0x18, 0xd7, 0xc6, 0x2f, 0x07, 0x2f, 0x73, 0x74, 0x08, 0xb5, 0xe2, 0x27, 0x6a, 0xe6,
- 0xdb, 0xd1, 0x86, 0x7e, 0xe3, 0x05, 0x2e, 0x18, 0xa8, 0x0b, 0xbb, 0x0f, 0x78, 0x06, 0x6a, 0x69,
- 0x58, 0xd6, 0x69, 0x2e, 0x94, 0xe6, 0x24, 0xa6, 0x6e, 0x3d, 0xef, 0x54, 0xe6, 0x99, 0xe2, 0x44,
- 0x48, 0xed, 0x3a, 0xe6, 0x94, 0x89, 0x7b, 0xdf, 0x2d, 0xa8, 0x9a, 0x1e, 0x6b, 0x46, 0x34, 0x8d,
- 0x11, 0xaf, 0xa0, 0xaa, 0x34, 0xd1, 0xb9, 0xad, 0xad, 0xa3, 0x47, 0xff, 0xca, 0x1a, 0x67, 0x65,
- 0x9c, 0xb3, 0xd0, 0x63, 0x70, 0x94, 0x48, 0x65, 0x48, 0x33, 0x5d, 0xb9, 0x27, 0xf5, 0x1c, 0xf0,
- 0x23, 0xf4, 0x1c, 0x9a, 0xe5, 0xcc, 0x70, 0xc2, 0x85, 0x32, 0xf6, 0x54, 0x70, 0x39, 0x48, 0x97,
- 0x19, 0x56, 0x0e, 0xd6, 0x74, 0x5a, 0x70, 0xd6, 0x06, 0x6b, 0x3a, 0x35, 0x94, 0xde, 0x05, 0xa0,
- 0xe5, 0x1e, 0x8c, 0x39, 0x49, 0xd4, 0x5c, 0x68, 0x85, 0x5e, 0x83, 0xa3, 0xca, 0xa4, 0xd8, 0x9d,
- 0xbd, 0x87, 0x77, 0x07, 0xaf, 0x88, 0x87, 0x27, 0x00, 0xab, 0x57, 0xa0, 0x36, 0xec, 0xfa, 0x97,
- 0xfe, 0x95, 0x3f, 0x1c, 0xf9, 0x1f, 0xfd, 0xcb, 0xf3, 0xf6, 0x16, 0x6a, 0x82, 0x33, 0xbc, 0x1e,
- 0xfa, 0xa3, 0xe1, 0xe9, 0xe8, 0xac, 0x6d, 0xa1, 0x06, 0xec, 0x8c, 0xce, 0x86, 0xd7, 0x59, 0xcd,
- 0x3e, 0x6d, 0xff, 0xb8, 0xef, 0x58, 0x3f, 0xef, 0x3b, 0xd6, 0xaf, 0xfb, 0x8e, 0xf5, 0xed, 0x77,
- 0x67, 0x6b, 0x52, 0x33, 0x1b, 0x7e, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0x9b, 0x13, 0x65,
- 0x2f, 0x04, 0x00, 0x00,
+ // 672 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x54, 0xc1, 0x6e, 0xd3, 0x4c,
+ 0x10, 0xae, 0x93, 0x26, 0x8d, 0x27, 0x4d, 0xfe, 0x68, 0xa5, 0xbf, 0x58, 0x45, 0x0d, 0x21, 0xa8,
+ 0x22, 0x2a, 0x22, 0x16, 0x2d, 0x07, 0xe8, 0x2d, 0x45, 0xa5, 0x72, 0x95, 0x56, 0xd5, 0xa6, 0xea,
+ 0x81, 0x8b, 0xb5, 0xb1, 0x37, 0xc9, 0x8a, 0x78, 0xd7, 0xda, 0x5d, 0x97, 0x96, 0xa7, 0xe0, 0x3d,
+ 0x78, 0x0d, 0x0e, 0x1c, 0x79, 0x04, 0x54, 0x1e, 0x82, 0x2b, 0xf2, 0xda, 0x4e, 0x52, 0xe8, 0x6d,
+ 0xe6, 0x9b, 0x6f, 0x77, 0x66, 0xbe, 0x99, 0x5d, 0x38, 0x9d, 0x32, 0x3d, 0x4b, 0xc6, 0xfd, 0x40,
+ 0x44, 0x6e, 0x74, 0x10, 0x8e, 0xdd, 0xe8, 0xc0, 0x55, 0x32, 0x70, 0x83, 0x79, 0xa2, 0x34, 0x95,
+ 0xee, 0x94, 0x72, 0x2a, 0x89, 0xa6, 0xa1, 0x1b, 0x4b, 0xa1, 0x85, 0x1b, 0xcf, 0x49, 0x40, 0x23,
+ 0xca, 0x75, 0x3c, 0x5e, 0xda, 0x7d, 0x13, 0x43, 0xf5, 0x95, 0x60, 0xf7, 0x77, 0x09, 0xec, 0x8b,
+ 0xc2, 0x47, 0xef, 0xc0, 0x66, 0x5c, 0x69, 0xc2, 0x03, 0xaa, 0x1c, 0xab, 0x53, 0xee, 0xd5, 0xf7,
+ 0x77, 0xfb, 0x2b, 0xf4, 0xfe, 0x82, 0xda, 0xf7, 0x0a, 0xde, 0x31, 0xd7, 0xf2, 0x16, 0x2f, 0xcf,
+ 0xa1, 0x5d, 0x68, 0x4a, 0x1a, 0xcf, 0x59, 0x40, 0xfc, 0x09, 0x09, 0xb4, 0x90, 0x4e, 0xa9, 0x63,
+ 0xf5, 0x1a, 0xb8, 0x91, 0xa3, 0xef, 0x0d, 0x88, 0x76, 0x00, 0x78, 0x12, 0xf9, 0x6a, 0x46, 0x64,
+ 0xa8, 0x9c, 0xb2, 0xa1, 0xd8, 0x3c, 0x89, 0x46, 0x06, 0x48, 0xc3, 0x4c, 0x65, 0x51, 0x1a, 0x3a,
+ 0xeb, 0x1d, 0xab, 0x57, 0xc3, 0x36, 0x53, 0xa3, 0x0c, 0x40, 0x4f, 0x61, 0x33, 0x48, 0xb4, 0xb8,
+ 0xa6, 0xd2, 0xd7, 0x2c, 0xa2, 0x4e, 0xa5, 0x63, 0xf5, 0xca, 0xb8, 0x9e, 0x63, 0x97, 0x2c, 0xa2,
+ 0xe8, 0x09, 0xd4, 0x99, 0xf2, 0x23, 0x26, 0xa5, 0x90, 0x34, 0x74, 0xaa, 0xe6, 0x0a, 0x60, 0xea,
+ 0x2c, 0x47, 0xd0, 0x73, 0x68, 0x45, 0xe4, 0x26, 0xcb, 0xe1, 0x2b, 0xaa, 0x7d, 0x16, 0x3a, 0x1b,
+ 0x59, 0xa9, 0x11, 0xb9, 0x31, 0x99, 0x46, 0x54, 0x7b, 0xe1, 0xf6, 0x08, 0x9a, 0xf7, 0xdb, 0x45,
+ 0x2d, 0x28, 0x7f, 0xa4, 0xb7, 0x8e, 0xd5, 0xb1, 0x7a, 0x36, 0x4e, 0x4d, 0xf4, 0x02, 0x2a, 0xd7,
+ 0x64, 0x9e, 0x50, 0xd3, 0x6c, 0x7d, 0xff, 0xff, 0x7b, 0xb2, 0x15, 0xa7, 0x71, 0xc6, 0x39, 0x2c,
+ 0xbd, 0xb1, 0xba, 0xdf, 0x4a, 0x50, 0x2b, 0x70, 0xd4, 0x84, 0x12, 0x0b, 0xf3, 0xeb, 0x4a, 0x2c,
+ 0x2d, 0xed, 0x3f, 0xa6, 0xc4, 0x9c, 0x68, 0x26, 0xb8, 0x3f, 0x95, 0x22, 0x89, 0xcd, 0xbd, 0x36,
+ 0x6e, 0x2e, 0xe0, 0x93, 0x14, 0x45, 0x08, 0xd6, 0x3f, 0x0b, 0x4e, 0x8d, 0x7e, 0x36, 0x36, 0x36,
+ 0xda, 0x82, 0xea, 0x27, 0xca, 0xa6, 0x33, 0x6d, 0x64, 0x6b, 0xe0, 0xdc, 0x43, 0xdb, 0x50, 0xa3,
+ 0x3c, 0x8c, 0x05, 0xe3, 0xda, 0xe8, 0x65, 0xe3, 0x85, 0x8f, 0xf6, 0xa0, 0x9a, 0x4f, 0xa2, 0x6a,
+ 0xc6, 0x8e, 0xee, 0xd5, 0x6f, 0xb4, 0xc0, 0x39, 0x03, 0x75, 0x60, 0xf3, 0x01, 0xcd, 0x40, 0x2d,
+ 0x04, 0x4b, 0x33, 0xcd, 0x84, 0xd2, 0x9c, 0x44, 0xd4, 0xa9, 0x65, 0x99, 0x0a, 0x3f, 0xad, 0x38,
+ 0x16, 0x52, 0x3b, 0xb6, 0x39, 0x65, 0x6c, 0xf4, 0x16, 0x6a, 0x11, 0xd5, 0x24, 0x24, 0x9a, 0x38,
+ 0x60, 0xf4, 0xdb, 0x79, 0x50, 0xbf, 0xb3, 0x9c, 0x84, 0x17, 0xf4, 0xee, 0x2b, 0x68, 0xfd, 0x1d,
+ 0x4d, 0x77, 0x27, 0xa4, 0xe3, 0x64, 0xea, 0x9b, 0x44, 0x56, 0xb6, 0x5a, 0x06, 0xb9, 0x10, 0x52,
+ 0x77, 0xbf, 0x5a, 0x50, 0x31, 0x1d, 0xad, 0xc8, 0xde, 0x30, 0xb2, 0xbf, 0x84, 0x8a, 0xd2, 0x44,
+ 0x67, 0x43, 0x6c, 0xee, 0x3f, 0xfa, 0x57, 0x84, 0x51, 0x1a, 0xc6, 0x19, 0x0b, 0x3d, 0x06, 0x5b,
+ 0x89, 0x44, 0x06, 0x34, 0x55, 0x21, 0x9b, 0x40, 0x2d, 0x03, 0xbc, 0x10, 0x3d, 0x83, 0x46, 0xb1,
+ 0xa1, 0x9c, 0x70, 0xa1, 0xcc, 0x30, 0xca, 0xb8, 0x58, 0xdb, 0xf3, 0x14, 0x2b, 0xd6, 0x78, 0x32,
+ 0xc9, 0x39, 0x2b, 0x6b, 0x3c, 0x99, 0x18, 0x4a, 0xf7, 0x14, 0xd0, 0xe2, 0xd5, 0x8d, 0x38, 0x89,
+ 0xd5, 0x4c, 0x68, 0x85, 0x5e, 0x83, 0xad, 0x0a, 0x27, 0x7f, 0xa9, 0x5b, 0x0f, 0xbf, 0x54, 0xbc,
+ 0x24, 0xee, 0x1d, 0x02, 0x2c, 0xbb, 0x40, 0x2d, 0xd8, 0xf4, 0xce, 0xbd, 0x4b, 0x6f, 0x30, 0xf4,
+ 0x3e, 0x78, 0xe7, 0x27, 0xad, 0x35, 0xd4, 0x00, 0x7b, 0x70, 0x35, 0xf0, 0x86, 0x83, 0xa3, 0xe1,
+ 0x71, 0xcb, 0x42, 0x75, 0xd8, 0x18, 0x1e, 0x0f, 0xae, 0xd2, 0x58, 0xe9, 0xa8, 0xf5, 0xfd, 0xae,
+ 0x6d, 0xfd, 0xb8, 0x6b, 0x5b, 0x3f, 0xef, 0xda, 0xd6, 0x97, 0x5f, 0xed, 0xb5, 0x71, 0xd5, 0xfc,
+ 0x27, 0x07, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x13, 0xbd, 0x6b, 0xbf, 0x9d, 0x04, 0x00, 0x00,
}
diff --git a/src/cluster/generated/proto/placementpb/placement.proto b/src/cluster/generated/proto/placementpb/placement.proto
index 58588566df..16a4564fae 100644
--- a/src/cluster/generated/proto/placementpb/placement.proto
+++ b/src/cluster/generated/proto/placementpb/placement.proto
@@ -40,15 +40,20 @@ message Placement {
}
message Instance {
- string id = 1;
- string isolation_group = 2;
- string zone = 3;
- uint32 weight = 4;
- string endpoint = 5;
- repeated Shard shards = 6;
- uint32 shard_set_id = 7;
- string hostname = 8;
- uint32 port = 9;
+ string id = 1;
+ string isolation_group = 2;
+ string zone = 3;
+ uint32 weight = 4;
+ string endpoint = 5;
+ repeated Shard shards = 6;
+ uint32 shard_set_id = 7;
+ string hostname = 8;
+ uint32 port = 9;
+ InstanceMetadata metadata = 10;
+}
+
+message InstanceMetadata {
+ uint32 debug_port = 1;
}
message Shard {
diff --git a/src/cluster/integration/etcd/etcd.go b/src/cluster/integration/etcd/etcd.go
index 392e591ce4..be82e688d5 100644
--- a/src/cluster/integration/etcd/etcd.go
+++ b/src/cluster/integration/etcd/etcd.go
@@ -25,6 +25,7 @@ import (
"fmt"
"io/ioutil"
"net/http"
+ "net/url"
"os"
"strings"
"time"
@@ -52,6 +53,8 @@ func New(opts Options) (EmbeddedKV, error) {
}
cfg := embed.NewConfig()
cfg.Dir = dir
+
+ setRandomPorts(cfg)
e, err := embed.StartEtcd(cfg)
if err != nil {
return nil, fmt.Errorf("unable to start etcd, err: %v", err)
@@ -63,6 +66,20 @@ func New(opts Options) (EmbeddedKV, error) {
}, nil
}
+func setRandomPorts(cfg *embed.Config) {
+ randomPortURL, err := url.Parse("http://localhost:0")
+ if err != nil {
+ panic(err.Error())
+ }
+
+ cfg.LPUrls = []url.URL{*randomPortURL}
+ cfg.APUrls = []url.URL{*randomPortURL}
+ cfg.LCUrls = []url.URL{*randomPortURL}
+ cfg.ACUrls = []url.URL{*randomPortURL}
+
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+}
+
func (e *embeddedKV) Close() error {
var multi errors.MultiError
@@ -91,7 +108,7 @@ func (e *embeddedKV) Start() error {
}
// ensure v3 api endpoints are available, https://github.com/coreos/etcd/pull/7075
- apiVersionEndpoint := fmt.Sprintf("%s/version", embed.DefaultListenClientURLs)
+ apiVersionEndpoint := fmt.Sprintf("http://%s/version", e.etcd.Clients[0].Addr().String())
fn := func() bool { return version3Available(apiVersionEndpoint) }
ok := xclock.WaitUntil(fn, timeout)
if !ok {
diff --git a/src/cluster/mem/mem.go b/src/cluster/mem/mem.go
new file mode 100644
index 0000000000..d1f4faf70a
--- /dev/null
+++ b/src/cluster/mem/mem.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package memcluster
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/m3db/m3/src/cluster/client"
+ "github.com/m3db/m3/src/cluster/kv"
+ "github.com/m3db/m3/src/cluster/kv/mem"
+ "github.com/m3db/m3/src/cluster/services"
+)
+
+const (
+ _kvPrefix = "_kv"
+)
+
+var (
+ // assert the interface matches.
+ _ client.Client = (*Client)(nil)
+)
+
+// Client provides a cluster/client.Client backed by kv/mem transaction store,
+// which stores data in memory instead of in etcd.
+type Client struct {
+ mu sync.Mutex
+ serviceOpts kv.OverrideOptions
+ cache map[cacheKey]kv.TxnStore
+}
+
+// New instantiates a client which defaults its stores to the given zone/env/namespace.
+func New(serviceOpts kv.OverrideOptions) *Client {
+ return &Client{
+ serviceOpts: serviceOpts,
+ cache: make(map[cacheKey]kv.TxnStore),
+ }
+}
+
+// Services constructs a gateway to all cluster services, backed by a mem store.
+func (c *Client) Services(opts services.OverrideOptions) (services.Services, error) {
+ if opts == nil {
+ opts = services.NewOverrideOptions()
+ }
+
+ errUnsupported := errors.New("currently unsupported for inMemoryClusterClient")
+
+ kvGen := func(zone string) (kv.Store, error) {
+ return c.Store(kv.NewOverrideOptions().SetZone(zone))
+ }
+
+ heartbeatGen := func(sid services.ServiceID) (services.HeartbeatService, error) {
+ return nil, errUnsupported
+ }
+
+ leaderGen := func(sid services.ServiceID, opts services.ElectionOptions) (services.LeaderService, error) {
+ return nil, errUnsupported
+ }
+
+ return services.NewServices(
+ services.NewOptions().
+ SetKVGen(kvGen).
+ SetHeartbeatGen(heartbeatGen).
+ SetLeaderGen(leaderGen).
+ SetNamespaceOptions(opts.NamespaceOptions()),
+ )
+}
+
+// KV returns/constructs a mem backed kv.Store for the default zone/env/namespace.
+func (c *Client) KV() (kv.Store, error) {
+ return c.TxnStore(kv.NewOverrideOptions())
+}
+
+// Txn returns/constructs a mem backed kv.TxnStore for the default zone/env/namespace.
+func (c *Client) Txn() (kv.TxnStore, error) {
+ return c.TxnStore(kv.NewOverrideOptions())
+}
+
+// Store returns/constructs a mem backed kv.Store for the given env/zone/namespace.
+func (c *Client) Store(opts kv.OverrideOptions) (kv.Store, error) {
+ return c.TxnStore(opts)
+}
+
+// TxnStore returns/constructs a mem backed kv.TxnStore for the given env/zone/namespace.
+func (c *Client) TxnStore(opts kv.OverrideOptions) (kv.TxnStore, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ opts = mergeOpts(c.serviceOpts, opts)
+ key := cacheKey{
+ Env: opts.Environment(),
+ Zone: opts.Zone(),
+ Namespace: opts.Namespace(),
+ }
+ if s, ok := c.cache[key]; ok {
+ return s, nil
+ }
+
+ store := mem.NewStore()
+ c.cache[key] = store
+ return store, nil
+}
+
+type cacheKey struct {
+ Env string
+ Zone string
+ Namespace string
+}
+
+func mergeOpts(defaults kv.OverrideOptions, opts kv.OverrideOptions) kv.OverrideOptions {
+ if opts.Zone() == "" {
+ opts = opts.SetZone(defaults.Zone())
+ }
+
+ if opts.Environment() == "" {
+ opts = opts.SetEnvironment(defaults.Environment())
+ }
+
+ if opts.Namespace() == "" {
+ opts = opts.SetNamespace(_kvPrefix)
+ }
+
+ return opts
+}
diff --git a/src/cluster/mem/mem_test.go b/src/cluster/mem/mem_test.go
new file mode 100644
index 0000000000..ca864ab40a
--- /dev/null
+++ b/src/cluster/mem/mem_test.go
@@ -0,0 +1,94 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package memcluster
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/cluster/kv"
+ "github.com/m3db/m3/src/cluster/placement"
+ "github.com/m3db/m3/src/cluster/services"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReusesStores(t *testing.T) {
+ key := "my_key"
+
+ c := New(kv.NewOverrideOptions())
+ store, err := c.TxnStore(kv.NewOverrideOptions())
+ require.NoError(t, err)
+ version, err := store.Set(key, &dummyProtoMessage{"my_value"})
+ require.NoError(t, err)
+
+ // retrieve the same store
+ sameStore, err := c.TxnStore(kv.NewOverrideOptions())
+ require.NoError(t, err)
+
+ v, err := sameStore.Get(key)
+ require.NoError(t, err)
+ assert.Equal(t, version, v.Version())
+
+ // other store doesn't have the value.
+ otherZone, err := c.TxnStore(kv.NewOverrideOptions().SetZone("other"))
+ require.NoError(t, err)
+ _, err = otherZone.Get(key)
+ assert.EqualError(t, err, "key not found")
+}
+
+func TestServices_Placement(t *testing.T) {
+ c := New(kv.NewOverrideOptions())
+ svcs, err := c.Services(services.NewOverrideOptions())
+ require.NoError(t, err)
+
+ placementSvc, err := svcs.PlacementService(services.NewServiceID().SetName("test_svc"), placement.NewOptions())
+ require.NoError(t, err)
+
+ p := placement.NewPlacement().SetInstances([]placement.Instance{
+ placement.NewInstance().SetHostname("host").SetEndpoint("127.0.0.1"),
+ })
+
+ p, err = placementSvc.Set(p)
+ require.NoError(t, err)
+
+ retrieved, err := placementSvc.Placement()
+ require.NoError(t, err)
+
+ // n.b.: placements are hard to compare directly since they're interfaces and contain more pointers than
+ // they ought, and it's not worth writing the method here.
+ assert.Equal(t, p.Version(), retrieved.Version())
+}
+
+// dummyProtoMessage implements proto.Message and exists solely as a thing
+// to pass to a kv.Store.
+type dummyProtoMessage struct {
+ Val string
+}
+
+func (d *dummyProtoMessage) Reset() {
+}
+
+func (d *dummyProtoMessage) String() string {
+ return d.Val
+}
+
+func (d *dummyProtoMessage) ProtoMessage() {
+}
diff --git a/src/cluster/placement/algo/mirrored_test.go b/src/cluster/placement/algo/mirrored_test.go
index 76e7cedc3d..d2588a0a54 100644
--- a/src/cluster/placement/algo/mirrored_test.go
+++ b/src/cluster/placement/algo/mirrored_test.go
@@ -36,37 +36,43 @@ func TestMirrorWorkflow(t *testing.T) {
SetIsolationGroup("r1").
SetEndpoint("endpoint1").
SetShardSetID(1).
- SetWeight(1)
+ SetWeight(1).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 1})
i2 := placement.NewInstance().
SetID("i2").
SetIsolationGroup("r2").
SetEndpoint("endpoint2").
SetShardSetID(1).
- SetWeight(1)
+ SetWeight(1).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 2})
i3 := placement.NewInstance().
SetID("i3").
SetIsolationGroup("r3").
SetEndpoint("endpoint3").
SetShardSetID(2).
- SetWeight(2)
+ SetWeight(2).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 3})
i4 := placement.NewInstance().
SetID("i4").
SetIsolationGroup("r4").
SetEndpoint("endpoint4").
SetShardSetID(2).
- SetWeight(2)
+ SetWeight(2).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 4})
i5 := placement.NewInstance().
SetID("i5").
SetIsolationGroup("r5").
SetEndpoint("endpoint5").
SetShardSetID(3).
- SetWeight(3)
+ SetWeight(3).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 5})
i6 := placement.NewInstance().
SetID("i6").
SetIsolationGroup("r6").
SetEndpoint("endpoint6").
SetShardSetID(3).
- SetWeight(3)
+ SetWeight(3).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 6})
instances := []placement.Instance{i1, i2, i3, i4, i5, i6}
@@ -100,18 +106,26 @@ func TestMirrorWorkflow(t *testing.T) {
SetIsolationGroup("r7").
SetEndpoint("endpoint7").
SetShardSetID(4).
- SetWeight(4)
+ SetWeight(4).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 7})
i8 := placement.NewInstance().
SetID("i8").
SetIsolationGroup("r8").
SetEndpoint("endpoint8").
SetShardSetID(4).
- SetWeight(4)
+ SetWeight(4).
+ SetMetadata(placement.InstanceMetadata{DebugPort: 8})
p, err = a.AddInstances(p, []placement.Instance{i7, i8})
assert.NoError(t, err)
assert.Equal(t, uint32(4), p.MaxShardSetID())
validateDistribution(t, p, 1.01)
+ // validate InstanceMetadata is still set on all instances
+ var zero placement.InstanceMetadata
+ for _, inst := range p.Instances() {
+ assert.NotEqual(t, zero, inst.Metadata())
+ }
+
newI1, ok := p.Instance("i1")
assert.True(t, ok)
assert.Equal(t, i1.SetShards(newI1.Shards()), newI1)
diff --git a/src/cluster/placement/config.go b/src/cluster/placement/config.go
index 02cd42ca56..801bce8bf6 100644
--- a/src/cluster/placement/config.go
+++ b/src/cluster/placement/config.go
@@ -27,6 +27,48 @@ import (
"github.com/m3db/m3/src/x/instrument"
)
+// Configuration is configuration for placement options.
+type Configuration struct {
+ AllowPartialReplace *bool `yaml:"allowPartialReplace"`
+ AllowAllZones *bool `yaml:"allowAllZones"`
+ AddAllCandidates *bool `yaml:"addAllCandidates"`
+ IsSharded *bool `yaml:"isSharded"`
+ ShardStateMode *ShardStateMode `yaml:"shardStateMode"`
+ IsMirrored *bool `yaml:"isMirrored"`
+ IsStaged *bool `yaml:"isStaged"`
+ ValidZone *string `yaml:"validZone"`
+}
+
+// NewOptions creates a placement options.
+func (c *Configuration) NewOptions() Options {
+ opts := NewOptions()
+ if value := c.AllowPartialReplace; value != nil {
+ opts = opts.SetAllowPartialReplace(*value)
+ }
+ if value := c.AllowAllZones; value != nil {
+ opts = opts.SetAllowAllZones(*value)
+ }
+ if value := c.AddAllCandidates; value != nil {
+ opts = opts.SetAddAllCandidates(*value)
+ }
+ if value := c.IsSharded; value != nil {
+ opts = opts.SetIsSharded(*value)
+ }
+ if value := c.ShardStateMode; value != nil {
+ opts = opts.SetShardStateMode(*value)
+ }
+ if value := c.IsMirrored; value != nil {
+ opts = opts.SetIsMirrored(*value)
+ }
+ if value := c.IsStaged; value != nil {
+ opts = opts.SetIsStaged(*value)
+ }
+ if value := c.ValidZone; value != nil {
+ opts = opts.SetValidZone(*value)
+ }
+ return opts
+}
+
// WatcherConfiguration contains placement watcher configuration.
type WatcherConfiguration struct {
// Placement key.
diff --git a/src/cluster/placement/options_test.go b/src/cluster/placement/options_test.go
index a7d23153f1..7d12007032 100644
--- a/src/cluster/placement/options_test.go
+++ b/src/cluster/placement/options_test.go
@@ -78,7 +78,8 @@ func TestPlacementOptions(t *testing.T) {
o = o.SetIsStaged(true)
assert.True(t, o.IsStaged())
- iopts := instrument.NewOptions().SetMetricsSamplingRate(0.5)
+ iopts := instrument.NewOptions().
+ SetTimerOptions(instrument.TimerOptions{StandardSampleRate: 0.5})
o = o.SetInstrumentOptions(iopts)
assert.Equal(t, iopts, o.InstrumentOptions())
diff --git a/src/cluster/placement/placement.go b/src/cluster/placement/placement.go
index 2368daf86c..c75e587bd0 100644
--- a/src/cluster/placement/placement.go
+++ b/src/cluster/placement/placement.go
@@ -429,6 +429,10 @@ func NewInstanceFromProto(instance *placementpb.Instance) (Instance, error) {
if err != nil {
return nil, err
}
+ debugPort := uint32(0)
+ if instance.Metadata != nil {
+ debugPort = instance.Metadata.DebugPort
+ }
return NewInstance().
SetID(instance.Id).
@@ -439,7 +443,10 @@ func NewInstanceFromProto(instance *placementpb.Instance) (Instance, error) {
SetShards(shards).
SetShardSetID(instance.ShardSetId).
SetHostname(instance.Hostname).
- SetPort(instance.Port), nil
+ SetPort(instance.Port).
+ SetMetadata(InstanceMetadata{
+ DebugPort: debugPort,
+ }), nil
}
type instance struct {
@@ -452,12 +459,13 @@ type instance struct {
port uint32
weight uint32
shardSetID uint32
+ metadata InstanceMetadata
}
func (i *instance) String() string {
return fmt.Sprintf(
- "Instance[ID=%s, IsolationGroup=%s, Zone=%s, Weight=%d, Endpoint=%s, Hostname=%s, Port=%d, ShardSetID=%d, Shards=%s]",
- i.id, i.isolationGroup, i.zone, i.weight, i.endpoint, i.hostname, i.port, i.shardSetID, i.shards.String(),
+ "Instance[ID=%s, IsolationGroup=%s, Zone=%s, Weight=%d, Endpoint=%s, Hostname=%s, Port=%d, ShardSetID=%d, Shards=%s, Metadata=%+v]",
+ i.id, i.isolationGroup, i.zone, i.weight, i.endpoint, i.hostname, i.port, i.shardSetID, i.shards.String(), i.metadata,
)
}
@@ -542,6 +550,15 @@ func (i *instance) SetShards(s shard.Shards) Instance {
return i
}
+func (i *instance) Metadata() InstanceMetadata {
+ return i.metadata
+}
+
+func (i *instance) SetMetadata(value InstanceMetadata) Instance {
+ i.metadata = value
+ return i
+}
+
func (i *instance) Proto() (*placementpb.Instance, error) {
ss, err := i.Shards().Proto()
if err != nil {
@@ -558,6 +575,9 @@ func (i *instance) Proto() (*placementpb.Instance, error) {
ShardSetId: i.ShardSetID(),
Hostname: i.Hostname(),
Port: i.Port(),
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: i.Metadata().DebugPort,
+ },
}, nil
}
@@ -592,7 +612,8 @@ func (i *instance) Clone() Instance {
SetHostname(i.Hostname()).
SetPort(i.Port()).
SetShardSetID(i.ShardSetID()).
- SetShards(i.Shards().Clone())
+ SetShards(i.Shards().Clone()).
+ SetMetadata(i.Metadata())
}
// Instances is a slice of instances that can produce a debug string.
diff --git a/src/cluster/placement/placement_mock.go b/src/cluster/placement/placement_mock.go
index 5bc001c688..f297c8e606 100644
--- a/src/cluster/placement/placement_mock.go
+++ b/src/cluster/placement/placement_mock.go
@@ -327,6 +327,34 @@ func (mr *MockInstanceMockRecorder) SetPort(value interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPort", reflect.TypeOf((*MockInstance)(nil).SetPort), value)
}
+// Metadata mocks base method
+func (m *MockInstance) Metadata() InstanceMetadata {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Metadata")
+ ret0, _ := ret[0].(InstanceMetadata)
+ return ret0
+}
+
+// Metadata indicates an expected call of Metadata
+func (mr *MockInstanceMockRecorder) Metadata() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockInstance)(nil).Metadata))
+}
+
+// SetMetadata mocks base method
+func (m *MockInstance) SetMetadata(value InstanceMetadata) Instance {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetMetadata", value)
+ ret0, _ := ret[0].(Instance)
+ return ret0
+}
+
+// SetMetadata indicates an expected call of SetMetadata
+func (mr *MockInstanceMockRecorder) SetMetadata(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMetadata", reflect.TypeOf((*MockInstance)(nil).SetMetadata), value)
+}
+
// Proto mocks base method
func (m *MockInstance) Proto() (*placementpb.Instance, error) {
m.ctrl.T.Helper()
@@ -2363,6 +2391,320 @@ func (mr *MockServiceMockRecorder) MarkAllShardsAvailable() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkAllShardsAvailable", reflect.TypeOf((*MockService)(nil).MarkAllShardsAvailable))
}
+// MockOperator is a mock of Operator interface
+type MockOperator struct {
+ ctrl *gomock.Controller
+ recorder *MockOperatorMockRecorder
+}
+
+// MockOperatorMockRecorder is the mock recorder for MockOperator
+type MockOperatorMockRecorder struct {
+ mock *MockOperator
+}
+
+// NewMockOperator creates a new mock instance
+func NewMockOperator(ctrl *gomock.Controller) *MockOperator {
+ mock := &MockOperator{ctrl: ctrl}
+ mock.recorder = &MockOperatorMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockOperator) EXPECT() *MockOperatorMockRecorder {
+ return m.recorder
+}
+
+// BuildInitialPlacement mocks base method
+func (m *MockOperator) BuildInitialPlacement(instances []Instance, numShards, rf int) (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BuildInitialPlacement", instances, numShards, rf)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BuildInitialPlacement indicates an expected call of BuildInitialPlacement
+func (mr *MockOperatorMockRecorder) BuildInitialPlacement(instances, numShards, rf interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildInitialPlacement", reflect.TypeOf((*MockOperator)(nil).BuildInitialPlacement), instances, numShards, rf)
+}
+
+// AddReplica mocks base method
+func (m *MockOperator) AddReplica() (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddReplica")
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddReplica indicates an expected call of AddReplica
+func (mr *MockOperatorMockRecorder) AddReplica() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddReplica", reflect.TypeOf((*MockOperator)(nil).AddReplica))
+}
+
+// AddInstances mocks base method
+func (m *MockOperator) AddInstances(candidates []Instance) (Placement, []Instance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddInstances", candidates)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].([]Instance)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// AddInstances indicates an expected call of AddInstances
+func (mr *MockOperatorMockRecorder) AddInstances(candidates interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddInstances", reflect.TypeOf((*MockOperator)(nil).AddInstances), candidates)
+}
+
+// RemoveInstances mocks base method
+func (m *MockOperator) RemoveInstances(leavingInstanceIDs []string) (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveInstances", leavingInstanceIDs)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveInstances indicates an expected call of RemoveInstances
+func (mr *MockOperatorMockRecorder) RemoveInstances(leavingInstanceIDs interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveInstances", reflect.TypeOf((*MockOperator)(nil).RemoveInstances), leavingInstanceIDs)
+}
+
+// ReplaceInstances mocks base method
+func (m *MockOperator) ReplaceInstances(leavingInstanceIDs []string, candidates []Instance) (Placement, []Instance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReplaceInstances", leavingInstanceIDs, candidates)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].([]Instance)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// ReplaceInstances indicates an expected call of ReplaceInstances
+func (mr *MockOperatorMockRecorder) ReplaceInstances(leavingInstanceIDs, candidates interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplaceInstances", reflect.TypeOf((*MockOperator)(nil).ReplaceInstances), leavingInstanceIDs, candidates)
+}
+
+// MarkShardsAvailable mocks base method
+func (m *MockOperator) MarkShardsAvailable(instanceID string, shardIDs ...uint32) (Placement, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{instanceID}
+ for _, a := range shardIDs {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "MarkShardsAvailable", varargs...)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarkShardsAvailable indicates an expected call of MarkShardsAvailable
+func (mr *MockOperatorMockRecorder) MarkShardsAvailable(instanceID interface{}, shardIDs ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{instanceID}, shardIDs...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkShardsAvailable", reflect.TypeOf((*MockOperator)(nil).MarkShardsAvailable), varargs...)
+}
+
+// MarkInstanceAvailable mocks base method
+func (m *MockOperator) MarkInstanceAvailable(instanceID string) (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarkInstanceAvailable", instanceID)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarkInstanceAvailable indicates an expected call of MarkInstanceAvailable
+func (mr *MockOperatorMockRecorder) MarkInstanceAvailable(instanceID interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkInstanceAvailable", reflect.TypeOf((*MockOperator)(nil).MarkInstanceAvailable), instanceID)
+}
+
+// MarkAllShardsAvailable mocks base method
+func (m *MockOperator) MarkAllShardsAvailable() (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarkAllShardsAvailable")
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarkAllShardsAvailable indicates an expected call of MarkAllShardsAvailable
+func (mr *MockOperatorMockRecorder) MarkAllShardsAvailable() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkAllShardsAvailable", reflect.TypeOf((*MockOperator)(nil).MarkAllShardsAvailable))
+}
+
+// Placement mocks base method
+func (m *MockOperator) Placement() Placement {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Placement")
+ ret0, _ := ret[0].(Placement)
+ return ret0
+}
+
+// Placement indicates an expected call of Placement
+func (mr *MockOperatorMockRecorder) Placement() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Placement", reflect.TypeOf((*MockOperator)(nil).Placement))
+}
+
+// Mockoperations is a mock of operations interface
+type Mockoperations struct {
+ ctrl *gomock.Controller
+ recorder *MockoperationsMockRecorder
+}
+
+// MockoperationsMockRecorder is the mock recorder for Mockoperations
+type MockoperationsMockRecorder struct {
+ mock *Mockoperations
+}
+
+// NewMockoperations creates a new mock instance
+func NewMockoperations(ctrl *gomock.Controller) *Mockoperations {
+ mock := &Mockoperations{ctrl: ctrl}
+ mock.recorder = &MockoperationsMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *Mockoperations) EXPECT() *MockoperationsMockRecorder {
+ return m.recorder
+}
+
+// BuildInitialPlacement mocks base method
+func (m *Mockoperations) BuildInitialPlacement(instances []Instance, numShards, rf int) (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BuildInitialPlacement", instances, numShards, rf)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BuildInitialPlacement indicates an expected call of BuildInitialPlacement
+func (mr *MockoperationsMockRecorder) BuildInitialPlacement(instances, numShards, rf interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildInitialPlacement", reflect.TypeOf((*Mockoperations)(nil).BuildInitialPlacement), instances, numShards, rf)
+}
+
+// AddReplica mocks base method
+func (m *Mockoperations) AddReplica() (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddReplica")
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddReplica indicates an expected call of AddReplica
+func (mr *MockoperationsMockRecorder) AddReplica() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddReplica", reflect.TypeOf((*Mockoperations)(nil).AddReplica))
+}
+
+// AddInstances mocks base method
+func (m *Mockoperations) AddInstances(candidates []Instance) (Placement, []Instance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddInstances", candidates)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].([]Instance)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// AddInstances indicates an expected call of AddInstances
+func (mr *MockoperationsMockRecorder) AddInstances(candidates interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddInstances", reflect.TypeOf((*Mockoperations)(nil).AddInstances), candidates)
+}
+
+// RemoveInstances mocks base method
+func (m *Mockoperations) RemoveInstances(leavingInstanceIDs []string) (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveInstances", leavingInstanceIDs)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// RemoveInstances indicates an expected call of RemoveInstances
+func (mr *MockoperationsMockRecorder) RemoveInstances(leavingInstanceIDs interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveInstances", reflect.TypeOf((*Mockoperations)(nil).RemoveInstances), leavingInstanceIDs)
+}
+
+// ReplaceInstances mocks base method
+func (m *Mockoperations) ReplaceInstances(leavingInstanceIDs []string, candidates []Instance) (Placement, []Instance, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReplaceInstances", leavingInstanceIDs, candidates)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].([]Instance)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// ReplaceInstances indicates an expected call of ReplaceInstances
+func (mr *MockoperationsMockRecorder) ReplaceInstances(leavingInstanceIDs, candidates interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplaceInstances", reflect.TypeOf((*Mockoperations)(nil).ReplaceInstances), leavingInstanceIDs, candidates)
+}
+
+// MarkShardsAvailable mocks base method
+func (m *Mockoperations) MarkShardsAvailable(instanceID string, shardIDs ...uint32) (Placement, error) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{instanceID}
+ for _, a := range shardIDs {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "MarkShardsAvailable", varargs...)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarkShardsAvailable indicates an expected call of MarkShardsAvailable
+func (mr *MockoperationsMockRecorder) MarkShardsAvailable(instanceID interface{}, shardIDs ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{instanceID}, shardIDs...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkShardsAvailable", reflect.TypeOf((*Mockoperations)(nil).MarkShardsAvailable), varargs...)
+}
+
+// MarkInstanceAvailable mocks base method
+func (m *Mockoperations) MarkInstanceAvailable(instanceID string) (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarkInstanceAvailable", instanceID)
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarkInstanceAvailable indicates an expected call of MarkInstanceAvailable
+func (mr *MockoperationsMockRecorder) MarkInstanceAvailable(instanceID interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkInstanceAvailable", reflect.TypeOf((*Mockoperations)(nil).MarkInstanceAvailable), instanceID)
+}
+
+// MarkAllShardsAvailable mocks base method
+func (m *Mockoperations) MarkAllShardsAvailable() (Placement, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MarkAllShardsAvailable")
+ ret0, _ := ret[0].(Placement)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MarkAllShardsAvailable indicates an expected call of MarkAllShardsAvailable
+func (mr *MockoperationsMockRecorder) MarkAllShardsAvailable() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkAllShardsAvailable", reflect.TypeOf((*Mockoperations)(nil).MarkAllShardsAvailable))
+}
+
// MockAlgorithm is a mock of Algorithm interface
type MockAlgorithm struct {
ctrl *gomock.Controller
diff --git a/src/cluster/placement/placement_test.go b/src/cluster/placement/placement_test.go
index 3e1fb1efe1..3ae141643f 100644
--- a/src/cluster/placement/placement_test.go
+++ b/src/cluster/placement/placement_test.go
@@ -386,7 +386,8 @@ func TestInstance(t *testing.T) {
SetShardSetID(0).
SetZone("zone").
SetHostname("host1").
- SetPort(123)
+ SetPort(123).
+ SetMetadata(InstanceMetadata{DebugPort: 456})
assert.NotNil(t, i1.Shards())
s := shard.NewShards([]shard.Shard{
shard.NewShard(1).SetState(shard.Available),
@@ -395,7 +396,7 @@ func TestInstance(t *testing.T) {
})
i1.SetShards(s)
description := fmt.Sprintf(
- "Instance[ID=id, IsolationGroup=isolationGroup, Zone=zone, Weight=1, Endpoint=endpoint, Hostname=host1, Port=123, ShardSetID=0, Shards=%s]",
+ "Instance[ID=id, IsolationGroup=isolationGroup, Zone=zone, Weight=1, Endpoint=endpoint, Hostname=host1, Port=123, ShardSetID=0, Shards=%s, Metadata={DebugPort:456}]",
s.String())
assert.Equal(t, description, i1.String())
@@ -536,6 +537,7 @@ func TestConvertBetweenProtoAndPlacement(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 0,
+ Metadata: &placementpb.InstanceMetadata{DebugPort: 123},
},
"i2": &placementpb.Instance{
Id: "i2",
@@ -545,6 +547,7 @@ func TestConvertBetweenProtoAndPlacement(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 1,
+ Metadata: &placementpb.InstanceMetadata{DebugPort: 456},
},
},
ReplicaFactor: 2,
@@ -564,7 +567,9 @@ func TestConvertBetweenProtoAndPlacement(t *testing.T) {
assert.Equal(t, uint32(1), p.MaxShardSetID())
instances := p.Instances()
assert.Equal(t, uint32(0), instances[0].ShardSetID())
+ assert.Equal(t, uint32(123), instances[0].Metadata().DebugPort)
assert.Equal(t, uint32(1), instances[1].ShardSetID())
+ assert.Equal(t, uint32(456), instances[1].Metadata().DebugPort)
placementProtoNew, err := p.Proto()
assert.NoError(t, err)
@@ -639,7 +644,10 @@ func TestPlacementInstanceToProto(t *testing.T) {
SetZone("z1").
SetEndpoint("e1").
SetWeight(1).
- SetShards(shards)
+ SetShards(shards).
+ SetMetadata(InstanceMetadata{
+ DebugPort: 123,
+ })
instanceProto, err := instance.Proto()
assert.NoError(t, err)
@@ -656,6 +664,9 @@ func TestPlacementInstanceToProto(t *testing.T) {
Endpoint: "e1",
Weight: 1,
Shards: protoShards,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 123,
+ },
}
assert.Equal(t, expInstance, instanceProto)
diff --git a/src/cluster/placement/service/mirrored_custom_groups_test.go b/src/cluster/placement/service/mirrored_custom_groups_test.go
index 641e576a13..a27c9df514 100644
--- a/src/cluster/placement/service/mirrored_custom_groups_test.go
+++ b/src/cluster/placement/service/mirrored_custom_groups_test.go
@@ -23,6 +23,7 @@ package service
import (
"fmt"
"math/rand"
+ "sync/atomic"
"testing"
"github.com/m3db/m3/src/cluster/kv"
@@ -269,26 +270,37 @@ func shardIDs(ss shard.Shards) []uint32 {
return ids
}
+func newTestInstance() placement.Instance {
+ return newInstanceWithID(nextTestInstanceID())
+}
+
func newInstanceWithID(id string) placement.Instance {
return placement.NewEmptyInstance(
id,
- nextIsolationGroup(),
+ nextTestIsolationGroup(),
zone,
fmt.Sprintf("localhost:%d", randPort()),
defaultWeight,
)
}
+var curInstanceNo int64
+
+// Uses global state; factor into a factory object if you need guarantees about the specific results.
+func nextTestInstanceID() string {
+ myNumber := atomic.AddInt64(&curInstanceNo, 1)
+ return fmt.Sprintf("instance-%d", myNumber)
+}
+
// completely random valid port, not necessarily open.
func randPort() int {
return rand.Intn(1 << 16)
}
-var curISOGroup = 0
+var curISOGroup int64
-// Not thread safe; factor into a factory object if you need that.
-func nextIsolationGroup() string {
- myGroup := curISOGroup
- curISOGroup++
+// Uses global state; factor into a factory object if you need guarantees about the specific results.
+func nextTestIsolationGroup() string {
+ myGroup := atomic.AddInt64(&curISOGroup, 1)
return fmt.Sprintf("iso-group-%d", myGroup)
}
diff --git a/src/cluster/placement/service/operator.go b/src/cluster/placement/service/operator.go
new file mode 100644
index 0000000000..75da980fc4
--- /dev/null
+++ b/src/cluster/placement/service/operator.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
+package service
+
+import (
+ "errors"
+
+ "github.com/m3db/m3/src/cluster/placement"
+)
+
+// NewPlacementOperator constructs a placement.Operator which performs transformations on the
+// given placement.
+// If initialPlacement is nil, BuildInitialPlacement must be called before any operations on the
+// placement.
+func NewPlacementOperator(initialPlacement placement.Placement, opts placement.Options) placement.Operator {
+ store := newDummyStore(initialPlacement)
+ return &placementOperator{
+ placementServiceImpl: newPlacementServiceImpl(opts, store),
+ store: store,
+ }
+}
+
+// placementOperator is implemented by a placementServiceImpl backed by a dummyStore, which just
+// sets in memory state and doesn't touch versions.
+type placementOperator struct {
+ *placementServiceImpl
+ store *dummyStore
+}
+
+func (p *placementOperator) Placement() placement.Placement {
+ return p.store.curPlacement
+}
+
+// dummyStore is a helper class for placementOperator. It stores a single placement in memory,
+// allowing us to use the same code to implement the actual placement.Service (which typically talks
+// to a fully fledged backing store) and placement.Operator, which only operates on memory.
+// Unlike proper placement.Storage implementations, all operations are unversioned;
+// version arguments are ignored, and the store never calls Placement.SetVersion. This makes it
+// distinct from e.g. the implementation in mem.NewStore.
+type dummyStore struct {
+ curPlacement placement.Placement
+}
+
+func newDummyStore(initialPlacement placement.Placement) *dummyStore {
+ return &dummyStore{curPlacement: initialPlacement}
+}
+
+func (d *dummyStore) Set(p placement.Placement) (placement.Placement, error) {
+ d.set(p)
+ return d.curPlacement, nil
+}
+
+func (d *dummyStore) set(p placement.Placement) {
+ d.curPlacement = p
+}
+
+// CheckAndSet on the dummy store is unconditional (no check).
+func (d *dummyStore) CheckAndSet(p placement.Placement, _ int) (placement.Placement, error) {
+ d.curPlacement = p
+ return d.curPlacement, nil
+}
+
+func (d *dummyStore) SetIfNotExist(p placement.Placement) (placement.Placement, error) {
+ if d.curPlacement != nil {
+ return nil, errors.New(
+ "placement already exists and can't be rebuilt. Construct a new placement.Operator",
+ )
+ }
+ d.curPlacement = p
+ return d.curPlacement, nil
+}
+
+func (d *dummyStore) Placement() (placement.Placement, error) {
+ if d.curPlacement == nil {
+ return nil, errors.New(
+ "no initial placement specified at operator construction; call BuildInitialPlacement or pass one in",
+ )
+ }
+ return d.curPlacement, nil
+}
+
diff --git a/src/cluster/placement/service/operator_test.go b/src/cluster/placement/service/operator_test.go
new file mode 100644
index 0000000000..af3b1b8688
--- /dev/null
+++ b/src/cluster/placement/service/operator_test.go
@@ -0,0 +1,193 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
+package service
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/cluster/placement"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOperator(t *testing.T) {
+ type testDeps struct {
+ options placement.Options
+ op placement.Operator
+ }
+ setup := func(t *testing.T) testDeps {
+ options := placement.NewOptions().SetAllowAllZones(true)
+ return testDeps{
+ options: options,
+ op: NewPlacementOperator(nil, options),
+ }
+ }
+
+ t.Run("errors when operations called on unset placement", func(t *testing.T) {
+ tdeps := setup(t)
+ _, _, err := tdeps.op.AddInstances([]placement.Instance{newTestInstance()})
+ require.Error(t, err)
+ })
+
+ t.Run("BuildInitialPlacement twice errors", func(t *testing.T) {
+ tdeps := setup(t)
+ _, err := tdeps.op.BuildInitialPlacement([]placement.Instance{newTestInstance()}, 10, 1)
+ require.NoError(t, err)
+
+ _, err = tdeps.op.BuildInitialPlacement([]placement.Instance{newTestInstance()}, 10, 1)
+ assertErrContains(t, err, "placement already exists and can't be rebuilt")
+ })
+
+ t.Run("end-to-end flow", func(t *testing.T) {
+ tdeps := setup(t)
+ op := NewPlacementOperator(nil, tdeps.options)
+ store := newMockStorage()
+
+ pl, err := op.BuildInitialPlacement([]placement.Instance{newTestInstance()}, 10, 1)
+ require.NoError(t, err)
+
+ initialVersion := pl.Version()
+
+ _, _, err = op.AddInstances([]placement.Instance{newTestInstance()})
+ require.NoError(t, err)
+
+ _, err = op.MarkAllShardsAvailable()
+ require.NoError(t, err)
+
+ _, err = store.SetIfNotExist(op.Placement())
+ require.NoError(t, err)
+
+ pl, err = store.Placement()
+ require.NoError(t, err)
+
+ // expect exactly one version increment, from store.SetIfNotExist
+ assert.Equal(t, initialVersion + 1, pl.Version())
+
+ // spot check the results
+ allAvailable := true
+ instances := pl.Instances()
+ assert.Len(t, instances, 2)
+ for _, inst := range instances {
+ allAvailable = allAvailable && inst.IsAvailable()
+ }
+ assert.True(t, allAvailable)
+ })
+}
+
+type dummyStoreTestDeps struct{
+ store *dummyStore
+ pl placement.Placement
+}
+
+func dummyStoreSetup(t *testing.T) dummyStoreTestDeps {
+ return dummyStoreTestDeps{
+ store: newDummyStore(nil),
+ pl: placement.NewPlacement(),
+ }
+}
+
+func TestDummyStore_Set(t *testing.T) {
+ t.Run("sets without touching version", func(t *testing.T) {
+ tdeps := dummyStoreSetup(t)
+ testSetsCorrectly(t, tdeps, tdeps.store.Set)
+ })
+}
+
+func TestDummyStore_Placement(t *testing.T) {
+ t.Run("returns placement", func(t *testing.T) {
+ tdeps := dummyStoreSetup(t)
+
+ store := newDummyStore(tdeps.pl)
+ actual, err := store.Placement()
+ require.NoError(t, err)
+ assert.Equal(t, actual, tdeps.pl)
+ })
+
+ t.Run("errors when nil", func(t *testing.T) {
+ tdeps := dummyStoreSetup(t)
+ _, err := tdeps.store.Placement()
+ assertErrContains(t, err, "no initial placement specified at operator construction")
+ })
+}
+
+func TestDummyStore_CheckAndSet(t *testing.T) {
+ t.Run("sets without touching version", func(t *testing.T) {
+ tdeps := dummyStoreSetup(t)
+ testSetsCorrectly(t, tdeps, func(pl placement.Placement) (placement.Placement, error) {
+ return tdeps.store.CheckAndSet(pl, 5)
+ })
+ })
+
+ t.Run("ignores version mismatches", func(t *testing.T) {
+ tdeps := dummyStoreSetup(t)
+ _, err := tdeps.store.CheckAndSet(tdeps.pl, 2)
+ require.NoError(t, err)
+
+ _, err = tdeps.store.CheckAndSet(tdeps.pl.SetVersion(5), 3)
+ require.NoError(t, err)
+
+ pl, err := tdeps.store.Placement()
+ require.NoError(t, err)
+ assert.Equal(t, tdeps.pl, pl)
+ })
+}
+
+func TestDummyStore_SetIfNotExists(t *testing.T) {
+ t.Run("sets without touching version", func(t *testing.T) {
+ tdeps := dummyStoreSetup(t)
+ testSetsCorrectly(t, tdeps, func(pl placement.Placement) (placement.Placement, error) {
+ return tdeps.store.SetIfNotExist(pl)
+ })
+ })
+
+ t.Run("errors if placement exists", func(t *testing.T) {
+ tdeps := dummyStoreSetup(t)
+ _, err := tdeps.store.SetIfNotExist(tdeps.pl)
+ require.NoError(t, err)
+ _, err = tdeps.store.SetIfNotExist(tdeps.pl)
+ assertErrContains(t, err, "placement already exists and can't be rebuilt")
+ })
+}
+
+// Run a *Set* function and check that it returned the right thing, didn't touch the version,
+// and actually set the value.
+func testSetsCorrectly(t *testing.T, tdeps dummyStoreTestDeps, set func(pl placement.Placement) (placement.Placement, error)) {
+ _, err := tdeps.store.Placement()
+ // should not be set yet
+ require.Error(t, err)
+
+ curVersion := tdeps.pl.Version()
+ rtn, err := set(tdeps.pl)
+ require.NoError(t, err)
+
+ assert.Equal(t, curVersion, rtn.Version())
+ assert.Equal(t, tdeps.pl, rtn)
+ curState, err := tdeps.store.Placement()
+ require.NoError(t, err)
+ assert.Equal(t, tdeps.pl, curState)
+}
+
+func assertErrContains(t *testing.T, err error, contained string) {
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), contained)
+}
diff --git a/src/cluster/placement/service/service.go b/src/cluster/placement/service/service.go
index bd5633b37c..67873db30f 100644
--- a/src/cluster/placement/service/service.go
+++ b/src/cluster/placement/service/service.go
@@ -33,15 +33,25 @@ import (
type placementService struct {
placement.Storage
-
- opts placement.Options
- algo placement.Algorithm
- selector placement.InstanceSelector
- logger *zap.Logger
+ *placementServiceImpl
}
// NewPlacementService returns an instance of placement service.
func NewPlacementService(s placement.Storage, opts placement.Options) placement.Service {
+ return &placementService{
+ Storage: s,
+ placementServiceImpl: newPlacementServiceImpl(
+ opts,
+ s,
+
+ ),
+ }
+}
+
+func newPlacementServiceImpl(
+ opts placement.Options,
+ storage minimalPlacementStorage,
+) *placementServiceImpl {
if opts == nil {
opts = placement.NewOptions()
}
@@ -51,8 +61,8 @@ func NewPlacementService(s placement.Storage, opts placement.Options) placement.
instanceSelector = selector.NewInstanceSelector(opts)
}
- return &placementService{
- Storage: s,
+ return &placementServiceImpl{
+ store: storage,
opts: opts,
algo: algo.NewAlgorithm(opts),
selector: instanceSelector,
@@ -60,7 +70,37 @@ func NewPlacementService(s placement.Storage, opts placement.Options) placement.
}
}
-func (ps *placementService) BuildInitialPlacement(
+// minimalPlacementStorage is the subset of the placement.Storage interface used by placement.Service
+// directly.
+type minimalPlacementStorage interface {
+
+ // Set writes a placement.
+ Set(p placement.Placement) (placement.Placement, error)
+
+ // CheckAndSet writes a placement.Placement if the current version
+ // matches the expected version.
+ CheckAndSet(p placement.Placement, version int) (placement.Placement, error)
+
+ // SetIfNotExist writes a placement.Placement.
+ SetIfNotExist(p placement.Placement) (placement.Placement, error)
+
+ // Placement reads placement.Placement.
+ Placement() (placement.Placement, error)
+}
+
+// type assertion
+var _ minimalPlacementStorage = placement.Storage(nil)
+
+type placementServiceImpl struct {
+ store minimalPlacementStorage
+
+ opts placement.Options
+ algo placement.Algorithm
+ selector placement.InstanceSelector
+ logger *zap.Logger
+}
+
+func (ps *placementServiceImpl) BuildInitialPlacement(
candidates []placement.Instance,
numShards int,
rf int,
@@ -92,11 +132,11 @@ func (ps *placementService) BuildInitialPlacement(
return nil, err
}
- return ps.SetIfNotExist(tempPlacement)
+ return ps.store.SetIfNotExist(tempPlacement)
}
-func (ps *placementService) AddReplica() (placement.Placement, error) {
- curPlacement, err := ps.Placement()
+func (ps *placementServiceImpl) AddReplica() (placement.Placement, error) {
+ curPlacement, err := ps.store.Placement()
if err != nil {
return nil, err
}
@@ -114,13 +154,13 @@ func (ps *placementService) AddReplica() (placement.Placement, error) {
return nil, err
}
- return ps.CheckAndSet(tempPlacement, curPlacement.Version())
+ return ps.store.CheckAndSet(tempPlacement, curPlacement.Version())
}
-func (ps *placementService) AddInstances(
+func (ps *placementServiceImpl) AddInstances(
candidates []placement.Instance,
) (placement.Placement, []placement.Instance, error) {
- curPlacement, err := ps.Placement()
+ curPlacement, err := ps.store.Placement()
if err != nil {
return nil, nil, err
}
@@ -151,15 +191,15 @@ func (ps *placementService) AddInstances(
addingInstances[i] = addingInstance
}
- newPlacement, err := ps.CheckAndSet(tempPlacement, curPlacement.Version())
+ newPlacement, err := ps.store.CheckAndSet(tempPlacement, curPlacement.Version())
if err != nil {
return nil, nil, err
}
return newPlacement, addingInstances, nil
}
-func (ps *placementService) RemoveInstances(instanceIDs []string) (placement.Placement, error) {
- curPlacement, err := ps.Placement()
+func (ps *placementServiceImpl) RemoveInstances(instanceIDs []string) (placement.Placement, error) {
+ curPlacement, err := ps.store.Placement()
if err != nil {
return nil, err
}
@@ -177,14 +217,14 @@ func (ps *placementService) RemoveInstances(instanceIDs []string) (placement.Pla
return nil, err
}
- return ps.CheckAndSet(tempPlacement, curPlacement.Version())
+ return ps.store.CheckAndSet(tempPlacement, curPlacement.Version())
}
-func (ps *placementService) ReplaceInstances(
+func (ps *placementServiceImpl) ReplaceInstances(
leavingInstanceIDs []string,
candidates []placement.Instance,
) (placement.Placement, []placement.Instance, error) {
- curPlacement, err := ps.Placement()
+ curPlacement, err := ps.store.Placement()
if err != nil {
return nil, nil, err
}
@@ -216,15 +256,15 @@ func (ps *placementService) ReplaceInstances(
addedInstances = append(addedInstances, addedInstance)
}
- newPlacement, err := ps.CheckAndSet(tempPlacement, curPlacement.Version())
+ newPlacement, err := ps.store.CheckAndSet(tempPlacement, curPlacement.Version())
if err != nil {
return nil, nil, err
}
return newPlacement, addedInstances, nil
}
-func (ps *placementService) MarkShardsAvailable(instanceID string, shardIDs ...uint32) (placement.Placement, error) {
- curPlacement, err := ps.Placement()
+func (ps *placementServiceImpl) MarkShardsAvailable(instanceID string, shardIDs ...uint32) (placement.Placement, error) {
+ curPlacement, err := ps.store.Placement()
if err != nil {
return nil, err
}
@@ -242,11 +282,11 @@ func (ps *placementService) MarkShardsAvailable(instanceID string, shardIDs ...u
return nil, err
}
- return ps.CheckAndSet(tempPlacement, curPlacement.Version())
+ return ps.store.CheckAndSet(tempPlacement, curPlacement.Version())
}
-func (ps *placementService) MarkInstanceAvailable(instanceID string) (placement.Placement, error) {
- curPlacement, err := ps.Placement()
+func (ps *placementServiceImpl) MarkInstanceAvailable(instanceID string) (placement.Placement, error) {
+ curPlacement, err := ps.store.Placement()
if err != nil {
return nil, err
}
@@ -275,11 +315,11 @@ func (ps *placementService) MarkInstanceAvailable(instanceID string) (placement.
return nil, err
}
- return ps.CheckAndSet(tempPlacement, curPlacement.Version())
+ return ps.store.CheckAndSet(tempPlacement, curPlacement.Version())
}
-func (ps *placementService) MarkAllShardsAvailable() (placement.Placement, error) {
- curPlacement, err := ps.Placement()
+func (ps *placementServiceImpl) MarkAllShardsAvailable() (placement.Placement, error) {
+ curPlacement, err := ps.store.Placement()
if err != nil {
return nil, err
}
@@ -300,5 +340,5 @@ func (ps *placementService) MarkAllShardsAvailable() (placement.Placement, error
return nil, err
}
- return ps.CheckAndSet(tempPlacement, curPlacement.Version())
+ return ps.store.CheckAndSet(tempPlacement, curPlacement.Version())
}
diff --git a/src/cluster/placement/staged_placement_test.go b/src/cluster/placement/staged_placement_test.go
index d565eb4471..c2438e8679 100644
--- a/src/cluster/placement/staged_placement_test.go
+++ b/src/cluster/placement/staged_placement_test.go
@@ -43,6 +43,9 @@ var (
&placementpb.Shard{Id: 0},
&placementpb.Shard{Id: 1},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
"instance2": &placementpb.Instance{
Id: "instance2",
@@ -51,6 +54,9 @@ var (
&placementpb.Shard{Id: 2},
&placementpb.Shard{Id: 3},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 2,
+ },
},
"instance3": &placementpb.Instance{
Id: "instance3",
@@ -59,6 +65,9 @@ var (
&placementpb.Shard{Id: 0},
&placementpb.Shard{Id: 1},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 3,
+ },
},
"instance4": &placementpb.Instance{
Id: "instance4",
@@ -67,6 +76,9 @@ var (
&placementpb.Shard{Id: 2},
&placementpb.Shard{Id: 3},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 4,
+ },
},
},
},
@@ -83,6 +95,9 @@ var (
&placementpb.Shard{Id: 2},
&placementpb.Shard{Id: 3},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
},
},
@@ -102,14 +117,16 @@ var (
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(0).SetState(shard.Initializing),
shard.NewShard(1).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
NewInstance().
SetID("instance3").
SetEndpoint("instance3_endpoint").
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(0).SetState(shard.Initializing),
shard.NewShard(1).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 3}),
},
1: []Instance{
NewInstance().
@@ -118,14 +135,16 @@ var (
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(0).SetState(shard.Initializing),
shard.NewShard(1).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
NewInstance().
SetID("instance3").
SetEndpoint("instance3_endpoint").
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(0).SetState(shard.Initializing),
shard.NewShard(1).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 3}),
},
2: []Instance{
NewInstance().
@@ -134,14 +153,16 @@ var (
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 2}),
NewInstance().
SetID("instance4").
SetEndpoint("instance4_endpoint").
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 4}),
},
3: []Instance{
NewInstance().
@@ -150,14 +171,16 @@ var (
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 2}),
NewInstance().
SetID("instance4").
SetEndpoint("instance4_endpoint").
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 4}),
},
},
instances: map[string]Instance{
@@ -167,28 +190,32 @@ var (
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(0).SetState(shard.Initializing),
shard.NewShard(1).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
"instance2": NewInstance().
SetID("instance2").
SetEndpoint("instance2_endpoint").
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 2}),
"instance3": NewInstance().
SetID("instance3").
SetEndpoint("instance3_endpoint").
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(0).SetState(shard.Initializing),
shard.NewShard(1).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 3}),
"instance4": NewInstance().
SetID("instance4").
SetEndpoint("instance4_endpoint").
SetShards(shard.NewShards([]shard.Shard{
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 4}),
},
},
&placement{
@@ -204,7 +231,8 @@ var (
shard.NewShard(1).SetState(shard.Initializing),
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
},
1: []Instance{
NewInstance().
@@ -215,7 +243,8 @@ var (
shard.NewShard(1).SetState(shard.Initializing),
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
},
2: []Instance{
NewInstance().
@@ -226,7 +255,8 @@ var (
shard.NewShard(1).SetState(shard.Initializing),
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
},
3: []Instance{
NewInstance().
@@ -237,7 +267,8 @@ var (
shard.NewShard(1).SetState(shard.Initializing),
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
},
},
instances: map[string]Instance{
@@ -249,7 +280,8 @@ var (
shard.NewShard(1).SetState(shard.Initializing),
shard.NewShard(2).SetState(shard.Initializing),
shard.NewShard(3).SetState(shard.Initializing),
- })),
+ })).
+ SetMetadata(InstanceMetadata{DebugPort: 1}),
},
},
}
diff --git a/src/cluster/placement/storage/helper_test.go b/src/cluster/placement/storage/helper_test.go
index 6e0bed8d6b..b8b2ca2935 100644
--- a/src/cluster/placement/storage/helper_test.go
+++ b/src/cluster/placement/storage/helper_test.go
@@ -41,6 +41,9 @@ func TestPlacementHelper(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 0,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
"i2": &placementpb.Instance{
Id: "i2",
@@ -50,6 +53,9 @@ func TestPlacementHelper(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 1,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 2,
+ },
},
},
ReplicaFactor: 2,
@@ -105,6 +111,9 @@ func TestPlacementSnapshotsHelper(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 0,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
"i2": &placementpb.Instance{
Id: "i2",
@@ -114,6 +123,9 @@ func TestPlacementSnapshotsHelper(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 1,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 2,
+ },
},
},
ReplicaFactor: 2,
@@ -131,6 +143,9 @@ func TestPlacementSnapshotsHelper(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 0,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
},
ReplicaFactor: 1,
@@ -183,6 +198,9 @@ func TestPlacementSnapshotsHelper(t *testing.T) {
Weight: 1,
Shards: protoShards,
ShardSetId: 0,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
},
ReplicaFactor: 1,
diff --git a/src/cluster/placement/storage/storage.go b/src/cluster/placement/storage/storage.go
index 6c30960704..027dd198a3 100644
--- a/src/cluster/placement/storage/storage.go
+++ b/src/cluster/placement/storage/storage.go
@@ -179,7 +179,7 @@ func (s *storage) Watch() (placement.Watch, error) {
if err != nil {
return nil, err
}
- return newPlacementWatch(w), nil
+ return newPlacementWatch(w, s.opts), nil
}
func (s *storage) PlacementForVersion(version int) (placement.Placement, error) {
diff --git a/src/cluster/placement/storage/watch.go b/src/cluster/placement/storage/watch.go
index e3033b38a8..6598ea9c8b 100644
--- a/src/cluster/placement/storage/watch.go
+++ b/src/cluster/placement/storage/watch.go
@@ -28,15 +28,17 @@ import (
)
var (
- errPlacementNotAvailable = errors.New("placement is not available")
+ errPlacementNotAvailable = errors.New("placement is not available")
+ errStagedPlacementNoActivePlacement = errors.New("staged placement with no active placement")
)
type w struct {
kv.ValueWatch
+ opts placement.Options
}
-func newPlacementWatch(vw kv.ValueWatch) placement.Watch {
- return &w{vw}
+func newPlacementWatch(vw kv.ValueWatch, opts placement.Options) placement.Watch {
+ return &w{ValueWatch: vw, opts: opts}
}
func (w *w) Get() (placement.Placement, error) {
@@ -44,9 +46,18 @@ func (w *w) Get() (placement.Placement, error) {
if v == nil {
return nil, errPlacementNotAvailable
}
- p, err := placementFromValue(v)
- if err != nil {
- return nil, err
+
+ if w.opts.IsStaged() {
+ p, err := placementsFromValue(v)
+ if err != nil {
+ return nil, err
+ }
+ if len(p) == 0 {
+ return nil, errStagedPlacementNoActivePlacement
+ }
+
+ return p[len(p)-1], nil
}
- return p, nil
+
+ return placementFromValue(v)
}
diff --git a/src/cluster/placement/types.go b/src/cluster/placement/types.go
index 91fc218e21..73b6edc62b 100644
--- a/src/cluster/placement/types.go
+++ b/src/cluster/placement/types.go
@@ -92,6 +92,12 @@ type Instance interface {
// SetPort sets the port of the instance.
SetPort(value uint32) Instance
+ // Metadata returns the metadata of the instance.
+ Metadata() InstanceMetadata
+
+ // SetMetadata sets the metadata of the instance.
+ SetMetadata(value InstanceMetadata) Instance
+
// Proto returns the proto representation for the Instance.
Proto() (*placementpb.Instance, error)
@@ -108,6 +114,11 @@ type Instance interface {
Clone() Instance
}
+// InstanceMetadata represents the metadata for a single Instance in the placement.
+type InstanceMetadata struct {
+ DebugPort uint32
+}
+
// Placement describes how instances are placed.
type Placement interface {
// InstancesForShard returns the instances for a given shard id.
@@ -503,7 +514,30 @@ type Storage interface {
// all write or update operations will persist the generated placement before returning success.
type Service interface {
Storage
+ operations
+}
+
+// Operator is a purely in-memory version of Service; it applies placement related operations to
+// a local copy of a placement without persisting anything to backing storage. This can be useful
+// to apply multiple placement operations in a row before persisting them, e.g.:
+//
+// func DoMultipleOps(opts placement.Options, store placement.Storage) {
+// curPlacement := store.Placement()
+// op := placement.NewOperator(curPlacement, opts)
+// op.ReplaceInstances(...)
+// op.MarkAllShardsAvailable()
+// store.CheckAndSet(op.Placement())
+// }
+type Operator interface {
+ operations
+
+ Placement() Placement
+}
+// operations are the methods shared by Service and Operator. This type is private because it's
+// not intended to be implemented directly; Operator and Service are the correct ways to access
+// these methods.
+type operations interface {
// BuildInitialPlacement initialize a placement.
BuildInitialPlacement(instances []Instance, numShards int, rf int) (Placement, error)
diff --git a/src/cmd/services/m3aggregator/config/aggregator.go b/src/cmd/services/m3aggregator/config/aggregator.go
index 7773b1b9c4..307fa69212 100644
--- a/src/cmd/services/m3aggregator/config/aggregator.go
+++ b/src/cmd/services/m3aggregator/config/aggregator.go
@@ -34,6 +34,7 @@ import (
"github.com/m3db/m3/src/aggregator/aggregation/quantile/cm"
"github.com/m3db/m3/src/aggregator/aggregator"
"github.com/m3db/m3/src/aggregator/aggregator/handler"
+ "github.com/m3db/m3/src/aggregator/aggregator/handler/writer"
aggclient "github.com/m3db/m3/src/aggregator/client"
aggruntime "github.com/m3db/m3/src/aggregator/runtime"
"github.com/m3db/m3/src/aggregator/sharding"
@@ -41,12 +42,14 @@ import (
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cluster/placement"
"github.com/m3db/m3/src/cluster/services"
+ "github.com/m3db/m3/src/cmd/services/m3aggregator/serve"
"github.com/m3db/m3/src/metrics/aggregation"
"github.com/m3db/m3/src/metrics/pipeline/applied"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/config/hostid"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/retry"
"github.com/m3db/m3/src/x/sync"
@@ -57,6 +60,10 @@ var (
errEmptyJitterBucketList = errors.New("empty jitter bucket list")
)
+var (
+ defaultNumPassthroughWriters = 8
+)
+
// AggregatorConfiguration contains aggregator configuration.
type AggregatorConfiguration struct {
// HostID is the local host ID configuration.
@@ -65,6 +72,10 @@ type AggregatorConfiguration struct {
// InstanceID is the instance ID configuration.
InstanceID InstanceIDConfiguration `yaml:"instanceID"`
+ // VerboseErrors sets whether or not to use verbose errors when
+ // value arrives too early, late, or other bad request like operation.
+ VerboseErrors bool `yaml:"verboseErrors"`
+
// AggregationTypes configs the aggregation types.
AggregationTypes aggregation.TypesConfiguration `yaml:"aggregationTypes"`
@@ -107,6 +118,11 @@ type AggregatorConfiguration struct {
// Resign timeout.
ResignTimeout time.Duration `yaml:"resignTimeout"`
+ // ShutdownWaitTimeout if non-zero will be how long the aggregator waits from
+ // receiving a shutdown signal to exit. This can make coordinating graceful
+ // shutdowns between two replicas safer.
+ ShutdownWaitTimeout time.Duration `yaml:"shutdownWaitTimeout"`
+
// Flush times manager.
FlushTimesManager flushTimesManagerConfiguration `yaml:"flushTimesManager"`
@@ -119,6 +135,9 @@ type AggregatorConfiguration struct {
// Flushing handler configuration.
Flush handler.FlushHandlerConfiguration `yaml:"flush"`
+ // Passthrough controls the passthrough knobs.
+ Passthrough *passthroughConfiguration `yaml:"passthrough"`
+
// Forwarding configuration.
Forwarding forwardingConfiguration `yaml:"forwarding"`
@@ -135,7 +154,7 @@ type AggregatorConfiguration struct {
MaxTimerBatchSizePerWrite int `yaml:"maxTimerBatchSizePerWrite" validate:"min=0"`
// Default storage policies.
- DefaultStoragePolicies []policy.StoragePolicy `yaml:"defaultStoragePolicies" validate:"nonzero"`
+ DefaultStoragePolicies []policy.StoragePolicy `yaml:"defaultStoragePolicies"`
// Maximum number of cached source sets.
MaxNumCachedSourceSets *int `yaml:"maxNumCachedSourceSets"`
@@ -232,13 +251,19 @@ type InstanceIDConfiguration struct {
func (c *AggregatorConfiguration) NewAggregatorOptions(
address string,
client client.Client,
+ serveOpts serve.Options,
runtimeOptsManager aggruntime.OptionsManager,
instrumentOpts instrument.Options,
) (aggregator.Options, error) {
opts := aggregator.NewOptions().
SetInstrumentOptions(instrumentOpts).
- SetRuntimeOptionsManager(runtimeOptsManager)
+ SetRuntimeOptionsManager(runtimeOptsManager).
+ SetVerboseErrors(c.VerboseErrors)
+ rwOpts := serveOpts.RWOptions()
+ if rwOpts == nil {
+ rwOpts = xio.NewOptions()
+ }
// Set the aggregation types options.
aggTypesOpts, err := c.AggregationTypes.NewOptions(instrumentOpts)
if err != nil {
@@ -264,7 +289,8 @@ func (c *AggregatorConfiguration) NewAggregatorOptions(
// Set administrative client.
// TODO(xichen): client retry threshold likely needs to be low for faster retries.
iOpts = instrumentOpts.SetMetricsScope(scope.SubScope("client"))
- adminClient, err := c.Client.NewAdminClient(client, clock.NewOptions(), iOpts)
+ adminClient, err := c.Client.NewAdminClient(
+ client, clock.NewOptions(), iOpts, rwOpts)
if err != nil {
return nil, err
}
@@ -357,12 +383,24 @@ func (c *AggregatorConfiguration) NewAggregatorOptions(
// Set flushing handler.
iOpts = instrumentOpts.SetMetricsScope(scope.SubScope("flush-handler"))
- flushHandler, err := c.Flush.NewHandler(client, iOpts)
+ flushHandler, err := c.Flush.NewHandler(client, iOpts, rwOpts)
if err != nil {
return nil, err
}
opts = opts.SetFlushHandler(flushHandler)
+ // Set passthrough writer.
+ aggShardFn, err := hashType.AggregatedShardFn()
+ if err != nil {
+ return nil, err
+ }
+ iOpts = instrumentOpts.SetMetricsScope(scope.SubScope("passthrough-writer"))
+ passthroughWriter, err := c.newPassthroughWriter(flushHandler, iOpts, aggShardFn)
+ if err != nil {
+ return nil, err
+ }
+ opts = opts.SetPassthroughWriter(passthroughWriter)
+
// Set max allowed forwarding delay function.
jitterEnabled := flushManagerOpts.JitterEnabled()
maxJitterFn := flushManagerOpts.MaxJitterFn()
@@ -565,12 +603,20 @@ func (c placementManagerConfiguration) NewPlacementManager(
type forwardingConfiguration struct {
// MaxSingleDelay is the maximum delay for a single forward step.
MaxSingleDelay time.Duration `yaml:"maxSingleDelay"`
+ // MaxConstDelay is the maximum delay for a forward step as a constant + resolution*numForwardedTimes.
+ MaxConstDelay time.Duration `yaml:"maxConstDelay"`
}
func (c forwardingConfiguration) MaxAllowedForwardingDelayFn(
jitterEnabled bool,
maxJitterFn aggregator.FlushJitterFn,
) aggregator.MaxAllowedForwardingDelayFn {
+ if v := c.MaxConstDelay; v > 0 {
+ return func(resolution time.Duration, numForwardedTimes int) time.Duration {
+ return v + (resolution * time.Duration(numForwardedTimes))
+ }
+ }
+
return func(resolution time.Duration, numForwardedTimes int) time.Duration {
// If jittering is enabled, we use max jitter fn to determine the initial jitter.
// Otherwise, flushing may start at any point within a resolution interval so we
@@ -607,7 +653,7 @@ func (c flushTimesManagerConfiguration) NewFlushTimesManager(
return nil, err
}
scope := instrumentOpts.MetricsScope()
- retrier := c.FlushTimesPersistRetrier.NewRetrier(scope.SubScope("flush-times-persist"))
+ retrier := c.FlushTimesPersistRetrier.NewRetrier(scope.SubScope("flush-times-persist-retrier"))
flushTimesManagerOpts := aggregator.NewFlushTimesManagerOptions().
SetInstrumentOptions(instrumentOpts).
SetFlushTimesKeyFmt(c.FlushTimesKeyFmt).
@@ -661,9 +707,9 @@ func (c electionManagerConfiguration) NewElectionManager(
}
campaignOpts = campaignOpts.SetLeaderValue(leaderValue)
scope := instrumentOpts.MetricsScope()
- campaignRetryOpts := c.CampaignRetrier.NewOptions(scope.SubScope("campaign"))
- changeRetryOpts := c.ChangeRetrier.NewOptions(scope.SubScope("change"))
- resignRetryOpts := c.ResignRetrier.NewOptions(scope.SubScope("resign"))
+ campaignRetryOpts := c.CampaignRetrier.NewOptions(scope.SubScope("campaign-retrier"))
+ changeRetryOpts := c.ChangeRetrier.NewOptions(scope.SubScope("change-retrier"))
+ resignRetryOpts := c.ResignRetrier.NewOptions(scope.SubScope("resign-retrier"))
opts := aggregator.NewElectionManagerOptions().
SetInstrumentOptions(instrumentOpts).
SetElectionOptions(electionOpts).
@@ -846,3 +892,40 @@ func setMetricPrefix(
}
return fn([]byte(*str))
}
+
+// PassthroughConfiguration contains the knobs for pass-through server.
+type passthroughConfiguration struct {
+ // Enabled controls whether the passthrough server/writer is enabled.
+ Enabled bool `yaml:"enabled"`
+
+ // NumWriters controls the number of passthrough writers used.
+ NumWriters int `yaml:"numWriters"`
+}
+
+func (c *AggregatorConfiguration) newPassthroughWriter(
+ flushHandler handler.Handler,
+ iOpts instrument.Options,
+ shardFn sharding.AggregatedShardFn,
+) (writer.Writer, error) {
+ // fallback gracefully
+ if c.Passthrough == nil || !c.Passthrough.Enabled {
+ iOpts.Logger().Info("passthrough writer disabled, blackholing all passthrough writes")
+ return writer.NewBlackholeWriter(), nil
+ }
+
+ count := defaultNumPassthroughWriters
+ if c.Passthrough.NumWriters != 0 {
+ count = c.Passthrough.NumWriters
+ }
+
+ writers := make([]writer.Writer, 0, count)
+ for i := 0; i < count; i++ {
+ writer, err := flushHandler.NewWriter(iOpts.MetricsScope())
+ if err != nil {
+ return nil, err
+ }
+ writers = append(writers, writer)
+ }
+
+ return writer.NewShardedWriter(writers, shardFn, iOpts)
+}
diff --git a/src/cmd/services/m3aggregator/config/config.go b/src/cmd/services/m3aggregator/config/config.go
index a9ffa863cd..0154baef97 100644
--- a/src/cmd/services/m3aggregator/config/config.go
+++ b/src/cmd/services/m3aggregator/config/config.go
@@ -33,11 +33,17 @@ type Configuration struct {
// Metrics configuration.
Metrics instrument.MetricsConfiguration `yaml:"metrics"`
+ // M3Msg server configuration.
+ // Optional.
+ M3Msg *M3MsgServerConfiguration `yaml:"m3msg"`
+
// Raw TCP server configuration.
- RawTCP RawTCPServerConfiguration `yaml:"rawtcp"`
+ // Optional.
+ RawTCP *RawTCPServerConfiguration `yaml:"rawtcp"`
// HTTP server configuration.
- HTTP HTTPServerConfiguration `yaml:"http"`
+ // Optional.
+ HTTP *HTTPServerConfiguration `yaml:"http"`
// Client configuration for key value store.
KVClient KVClientConfiguration `yaml:"kvClient" validate:"nonzero"`
diff --git a/src/cmd/services/m3aggregator/config/server.go b/src/cmd/services/m3aggregator/config/server.go
index 7eda325233..d979c749f5 100644
--- a/src/cmd/services/m3aggregator/config/server.go
+++ b/src/cmd/services/m3aggregator/config/server.go
@@ -24,15 +24,40 @@ import (
"time"
"github.com/m3db/m3/src/aggregator/server/http"
+ "github.com/m3db/m3/src/aggregator/server/m3msg"
"github.com/m3db/m3/src/aggregator/server/rawtcp"
"github.com/m3db/m3/src/metrics/encoding/msgpack"
"github.com/m3db/m3/src/metrics/encoding/protobuf"
+ "github.com/m3db/m3/src/msg/consumer"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/retry"
xserver "github.com/m3db/m3/src/x/server"
)
+// M3MsgServerConfiguration contains M3Msg server configuration.
+type M3MsgServerConfiguration struct {
+ // Server is the server configuration.
+ Server xserver.Configuration `yaml:"server"`
+
+ // Consumer is the M3Msg consumer configuration.
+ Consumer consumer.Configuration `yaml:"consumer"`
+}
+
+// NewServerOptions creates a new set of M3Msg server options.
+func (c *M3MsgServerConfiguration) NewServerOptions(
+ instrumentOpts instrument.Options,
+) (m3msg.Options, error) {
+ opts := m3msg.NewOptions().
+ SetInstrumentOptions(instrumentOpts).
+ SetServerOptions(c.Server.NewOptions(instrumentOpts)).
+ SetConsumerOptions(c.Consumer.NewOptions(instrumentOpts))
+ if err := opts.Validate(); err != nil {
+ return nil, err
+ }
+ return opts, nil
+}
+
// RawTCPServerConfiguration contains raw TCP server configuration.
type RawTCPServerConfiguration struct {
// Raw TCP server listening address.
diff --git a/src/cmd/services/m3aggregator/main/main.go b/src/cmd/services/m3aggregator/main/main.go
index 4948126d61..fd19406a58 100644
--- a/src/cmd/services/m3aggregator/main/main.go
+++ b/src/cmd/services/m3aggregator/main/main.go
@@ -24,23 +24,12 @@ import (
"flag"
"fmt"
"os"
- "os/signal"
- "syscall"
- "time"
- m3aggregator "github.com/m3db/m3/src/aggregator/aggregator"
+ "github.com/m3db/m3/src/aggregator/server"
"github.com/m3db/m3/src/cmd/services/m3aggregator/config"
- "github.com/m3db/m3/src/cmd/services/m3aggregator/serve"
xconfig "github.com/m3db/m3/src/x/config"
"github.com/m3db/m3/src/x/config/configflag"
"github.com/m3db/m3/src/x/etcd"
- "github.com/m3db/m3/src/x/instrument"
-
- "go.uber.org/zap"
-)
-
-const (
- gracefulShutdownTimeout = 15 * time.Second
)
func main() {
@@ -60,97 +49,7 @@ func main() {
os.Exit(1)
}
- // Create logger and metrics scope.
- logger, err := cfg.Logging.BuildLogger()
- if err != nil {
- // NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
- // sending stdlib "log" to black hole. Don't remove unless with good reason.
- fmt.Fprintf(os.Stderr, "error creating logger: %v\n", err)
- os.Exit(1)
- }
- defer logger.Sync()
-
- xconfig.WarnOnDeprecation(cfg, logger)
-
- scope, closer, err := cfg.Metrics.NewRootScope()
- if err != nil {
- logger.Fatal("error creating metrics root scope", zap.Error(err))
- }
- defer closer.Close()
- instrumentOpts := instrument.NewOptions().
- SetLogger(logger).
- SetMetricsScope(scope).
- SetMetricsSamplingRate(cfg.Metrics.SampleRate()).
- SetReportInterval(cfg.Metrics.ReportInterval())
-
- // Create the raw TCP server options.
- rawTCPAddr := cfg.RawTCP.ListenAddress
- rawTCPServerScope := scope.SubScope("rawtcp-server").Tagged(map[string]string{"server": "rawtcp"})
- iOpts := instrumentOpts.SetMetricsScope(rawTCPServerScope)
- rawTCPServerOpts := cfg.RawTCP.NewServerOptions(iOpts)
-
- // Create the http server options.
- httpAddr := cfg.HTTP.ListenAddress
- httpServerOpts := cfg.HTTP.NewServerOptions()
-
- // Create the kv client.
- iOpts = instrumentOpts.SetMetricsScope(scope.SubScope("kv-client"))
- client, err := cfg.KVClient.NewKVClient(iOpts)
- if err != nil {
- logger.Fatal("error creating the kv client", zap.Error(err))
- }
-
- // Create the runtime options manager.
- runtimeOptsManager := cfg.RuntimeOptions.NewRuntimeOptionsManager()
-
- // Create the aggregator.
- iOpts = instrumentOpts.SetMetricsScope(scope.SubScope("aggregator"))
- aggregatorOpts, err := cfg.Aggregator.NewAggregatorOptions(rawTCPAddr, client, runtimeOptsManager, iOpts)
- if err != nil {
- logger.Fatal("error creating aggregator options", zap.Error(err))
- }
- aggregator := m3aggregator.NewAggregator(aggregatorOpts)
- if err := aggregator.Open(); err != nil {
- logger.Fatal("error opening the aggregator", zap.Error(err))
- }
-
- // Watch runtime option changes after aggregator is open.
- placementManager := aggregatorOpts.PlacementManager()
- cfg.RuntimeOptions.WatchRuntimeOptionChanges(client, runtimeOptsManager, placementManager, logger)
-
- doneCh := make(chan struct{})
- closedCh := make(chan struct{})
- go func() {
- if err := serve.Serve(
- rawTCPAddr,
- rawTCPServerOpts,
- httpAddr,
- httpServerOpts,
- aggregator,
- doneCh,
- instrumentOpts,
- ); err != nil {
- logger.Fatal("could not start serving traffic", zap.Error(err))
- }
- logger.Debug("server closed")
- close(closedCh)
- }()
-
- // Handle interrupts.
- logger.Warn("interrupt", zap.Any("signal", interrupt()))
-
- close(doneCh)
-
- select {
- case <-closedCh:
- logger.Info("server closed clean")
- case <-time.After(gracefulShutdownTimeout):
- logger.Info("server closed due to timeout", zap.Duration("timeout", gracefulShutdownTimeout))
- }
-}
-
-func interrupt() error {
- c := make(chan os.Signal, 1)
- signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
- return fmt.Errorf("%s", <-c)
+ server.Run(server.RunOptions{
+ Config: cfg,
+ })
}
diff --git a/src/cmd/services/m3aggregator/serve/options.go b/src/cmd/services/m3aggregator/serve/options.go
new file mode 100644
index 0000000000..8fa16f82f5
--- /dev/null
+++ b/src/cmd/services/m3aggregator/serve/options.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package serve
+
+import (
+ httpserver "github.com/m3db/m3/src/aggregator/server/http"
+ m3msgserver "github.com/m3db/m3/src/aggregator/server/m3msg"
+ rawtcpserver "github.com/m3db/m3/src/aggregator/server/rawtcp"
+ "github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
+)
+
+// Options are aggregator options.
+type Options interface {
+ // SetM3MsgAddr sets the M3 message address.
+ SetM3MsgAddr(value string) Options
+
+ // M3MsgAddr returns the M3 message address.
+ M3MsgAddr() string
+
+ // SetM3MsgServerOpts sets the M3MsgServerOpts.
+ SetM3MsgServerOpts(value m3msgserver.Options) Options
+
+ // M3MsgServerOpts returns the M3MsgServerOpts.
+ M3MsgServerOpts() m3msgserver.Options
+
+ // SetRawTCPAddr sets the RawTCP address.
+ SetRawTCPAddr(value string) Options
+
+ // RawTCPAddr returns the RawTCP address.
+ RawTCPAddr() string
+
+ // SetRawTCPServerOpts sets the RawTCPServerOpts.
+ SetRawTCPServerOpts(value rawtcpserver.Options) Options
+
+ // RawTCPServerOpts returns the RawTCPServerOpts.
+ RawTCPServerOpts() rawtcpserver.Options
+
+ // SetHTTPAddr sets the HTTP address.
+ SetHTTPAddr(value string) Options
+
+ // HTTPAddr returns the HTTP address.
+ HTTPAddr() string
+
+ // SetHTTPServerOpts sets the HTTPServerOpts.
+ SetHTTPServerOpts(value httpserver.Options) Options
+
+ // HTTPServerOpts returns the HTTPServerOpts.
+ HTTPServerOpts() httpserver.Options
+
+ // SetInstrumentOpts sets the InstrumentOpts.
+ SetInstrumentOpts(value instrument.Options) Options
+
+ // InstrumentOpts returns the InstrumentOpts.
+ InstrumentOpts() instrument.Options
+
+ // SetRWOptions sets RW options.
+ SetRWOptions(value xio.Options) Options
+
+ // RWOptions returns the RW options.
+ RWOptions() xio.Options
+}
+
+type options struct {
+ m3msgAddr string
+ m3msgServerOpts m3msgserver.Options
+ rawTCPAddr string
+ rawTCPServerOpts rawtcpserver.Options
+ httpAddr string
+ httpServerOpts httpserver.Options
+ iOpts instrument.Options
+ rwOpts xio.Options
+}
+
+// NewOptions creates a new aggregator server options.
+func NewOptions(iOpts instrument.Options) Options {
+ return &options{
+ iOpts: iOpts,
+ rwOpts: xio.NewOptions(),
+ }
+}
+
+func (o *options) SetM3MsgAddr(value string) Options {
+ opts := *o
+ opts.m3msgAddr = value
+ return &opts
+}
+
+func (o *options) M3MsgAddr() string {
+ return o.m3msgAddr
+}
+
+func (o *options) SetM3MsgServerOpts(value m3msgserver.Options) Options {
+ opts := *o
+ opts.m3msgServerOpts = value
+ return &opts
+}
+
+func (o *options) M3MsgServerOpts() m3msgserver.Options {
+ return o.m3msgServerOpts
+}
+
+func (o *options) SetRawTCPAddr(value string) Options {
+ opts := *o
+ opts.rawTCPAddr = value
+ return &opts
+}
+
+func (o *options) RawTCPAddr() string {
+ return o.rawTCPAddr
+}
+
+func (o *options) SetRawTCPServerOpts(value rawtcpserver.Options) Options {
+ opts := *o
+ opts.rawTCPServerOpts = value
+ return &opts
+}
+
+func (o *options) RawTCPServerOpts() rawtcpserver.Options {
+ return o.rawTCPServerOpts
+}
+
+func (o *options) SetHTTPAddr(value string) Options {
+ opts := *o
+ opts.httpAddr = value
+ return &opts
+}
+
+func (o *options) HTTPAddr() string {
+ return o.httpAddr
+}
+
+func (o *options) SetHTTPServerOpts(value httpserver.Options) Options {
+ opts := *o
+ opts.httpServerOpts = value
+ return &opts
+}
+
+func (o *options) HTTPServerOpts() httpserver.Options {
+ return o.httpServerOpts
+}
+
+func (o *options) SetInstrumentOpts(value instrument.Options) Options {
+ opts := *o
+ opts.iOpts = value
+ return &opts
+}
+
+func (o *options) InstrumentOpts() instrument.Options {
+ return o.iOpts
+}
+
+func (o *options) SetRWOptions(value xio.Options) Options {
+ opts := *o
+ opts.rwOpts = value
+ return &opts
+}
+
+func (o *options) RWOptions() xio.Options {
+ return o.rwOpts
+}
diff --git a/src/cmd/services/m3aggregator/serve/serve.go b/src/cmd/services/m3aggregator/serve/serve.go
index f4c47ad4d5..62c29ca747 100644
--- a/src/cmd/services/m3aggregator/serve/serve.go
+++ b/src/cmd/services/m3aggregator/serve/serve.go
@@ -25,36 +25,54 @@ import (
"github.com/m3db/m3/src/aggregator/aggregator"
httpserver "github.com/m3db/m3/src/aggregator/server/http"
+ m3msgserver "github.com/m3db/m3/src/aggregator/server/m3msg"
rawtcpserver "github.com/m3db/m3/src/aggregator/server/rawtcp"
- "github.com/m3db/m3/src/x/instrument"
+
+ "go.uber.org/zap"
)
// Serve starts serving RPC traffic.
func Serve(
- rawTCPAddr string,
- rawTCPServerOpts rawtcpserver.Options,
- httpAddr string,
- httpServerOpts httpserver.Options,
aggregator aggregator.Aggregator,
doneCh chan struct{},
- iOpts instrument.Options,
+ opts Options,
) error {
- log := rawTCPServerOpts.InstrumentOptions().Logger().Sugar()
+ iOpts := opts.InstrumentOpts()
+ log := iOpts.Logger()
defer aggregator.Close()
- rawTCPServer := rawtcpserver.NewServer(rawTCPAddr, aggregator, rawTCPServerOpts)
- if err := rawTCPServer.ListenAndServe(); err != nil {
- return fmt.Errorf("could not start raw TCP server at %s: %v", rawTCPAddr, err)
+ if m3msgAddr := opts.M3MsgAddr(); m3msgAddr != "" {
+ serverOpts := opts.M3MsgServerOpts()
+ m3msgServer, err := m3msgserver.NewServer(m3msgAddr, aggregator, serverOpts)
+ if err != nil {
+ return fmt.Errorf("could not create m3msg server: addr=%s, err=%v", m3msgAddr, err)
+ }
+ if err := m3msgServer.ListenAndServe(); err != nil {
+ return fmt.Errorf("could not start m3msg server at: addr=%s, err=%v", m3msgAddr, err)
+ }
+ defer m3msgServer.Close()
+ log.Info("m3msg server listening", zap.String("addr", m3msgAddr))
+ }
+
+ if rawTCPAddr := opts.RawTCPAddr(); rawTCPAddr != "" {
+ serverOpts := opts.RawTCPServerOpts()
+ rawTCPServer := rawtcpserver.NewServer(rawTCPAddr, aggregator, serverOpts)
+ if err := rawTCPServer.ListenAndServe(); err != nil {
+ return fmt.Errorf("could not start raw TCP server at: addr=%s, err=%v", rawTCPAddr, err)
+ }
+ defer rawTCPServer.Close()
+ log.Info("raw TCP server listening", zap.String("addr", rawTCPAddr))
}
- defer rawTCPServer.Close()
- log.Infof("raw TCP server: listening on %s", rawTCPAddr)
- httpServer := httpserver.NewServer(httpAddr, aggregator, httpServerOpts, iOpts)
- if err := httpServer.ListenAndServe(); err != nil {
- return fmt.Errorf("could not start http server at %s: %v", httpAddr, err)
+ if httpAddr := opts.HTTPAddr(); httpAddr != "" {
+ serverOpts := opts.HTTPServerOpts()
+ httpServer := httpserver.NewServer(httpAddr, aggregator, serverOpts, iOpts)
+ if err := httpServer.ListenAndServe(); err != nil {
+ return fmt.Errorf("could not start http server at: addr=%s, err=%v", httpAddr, err)
+ }
+ defer httpServer.Close()
+ log.Info("http server listening", zap.String("addr", httpAddr))
}
- defer httpServer.Close()
- log.Infof("http server: listening on %s", httpAddr)
// Wait for exit signal.
<-doneCh
diff --git a/src/cmd/services/m3comparator/main/filterer.go b/src/cmd/services/m3comparator/main/filterer.go
new file mode 100644
index 0000000000..e0470589c8
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/filterer.go
@@ -0,0 +1,115 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+)
+
+func filter(series encoding.SeriesIterators, tagMatchers models.Matchers) encoding.SeriesIterators {
+ var filtered []encoding.SeriesIterator
+
+ for _, iter := range series.Iters() {
+ if matchesAll(tagMatchers, iter.Tags()) {
+ filtered = append(filtered, iter)
+ }
+ }
+
+ return encoding.NewSeriesIterators(filtered, nil)
+}
+
+func matchesAll(tagMatchers models.Matchers, tagsIter ident.TagIterator) bool {
+ for _, tagMatcher := range tagMatchers {
+ if !isIgnored(tagMatcher) && !matchesOne(tagMatcher, tagsIter) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func matchesOne(tagMatcher models.Matcher, tagsIter ident.TagIterator) bool {
+ tag := findTag(tagMatcher.Name, tagsIter)
+
+ return matches(tagMatcher, tag)
+}
+
+func isIgnored(tagMatcher models.Matcher) bool {
+ return tagMatcher.String() == `role="remote"` // this matcher gets injected by Prometheus
+}
+
+func findTag(name []byte, tagsIter ident.TagIterator) ident.Tag {
+ tagsIter = tagsIter.Duplicate()
+ defer tagsIter.Close()
+
+ for tagsIter.Next() {
+ tag := tagsIter.Current()
+ if bytes.Equal(tag.Name.Bytes(), name) {
+ return tag
+ }
+ }
+
+ return ident.StringTag("", "")
+}
+
+func matches(tagMatcher models.Matcher, tag ident.Tag) bool {
+ var (
+ name = tag.Name.Bytes()
+ value = tag.Value.Bytes()
+ invert = false
+ )
+
+ switch tagMatcher.Type {
+
+ case models.MatchNotEqual:
+ invert = true
+ fallthrough
+
+ case models.MatchEqual:
+ return bytes.Equal(tagMatcher.Value, value) != invert
+
+ case models.MatchNotRegexp:
+ invert = true
+ fallthrough
+
+ case models.MatchRegexp:
+ m, _ := regexp.Match(fmt.Sprintf("^%s$", tagMatcher.Value), value)
+ return m != invert
+
+ case models.MatchNotField:
+ invert = true
+ fallthrough
+
+ case models.MatchField:
+ return bytes.Equal(tagMatcher.Name, name) != invert
+
+ case models.MatchAll:
+ return true
+ }
+
+ return false
+}
diff --git a/src/cmd/services/m3comparator/main/filterer_test.go b/src/cmd/services/m3comparator/main/filterer_test.go
new file mode 100644
index 0000000000..365b1a7862
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/filterer_test.go
@@ -0,0 +1,267 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/models"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ series1 = parser.Tags{
+ parser.NewTag("foo", "bar"),
+ parser.NewTag("baz", "quix"),
+ }
+
+ series2 = parser.Tags{
+ parser.NewTag("alpha", "a"),
+ parser.NewTag("beta", "b"),
+ }
+
+ allSeries = list(series1, series2)
+)
+
+func TestFilter(t *testing.T) {
+ testCases := []struct {
+ name string
+ givenMatchers models.Matchers
+ wantedSeries []parser.IngestSeries
+ }{
+ {
+ name: "No matchers",
+ givenMatchers: models.Matchers{},
+ wantedSeries: list(series1, series2),
+ },
+
+ {
+ name: "MatchEqual on one tag",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchEqual, "bar"),
+ },
+ wantedSeries: list(series1),
+ },
+ {
+ name: "MatchEqual on two tags",
+ givenMatchers: models.Matchers{
+ tagMatcher("alpha", models.MatchEqual, "a"),
+ tagMatcher("beta", models.MatchEqual, "b"),
+ },
+ wantedSeries: list(series2),
+ },
+ {
+ name: "Two MatchEqual excluding every series each",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchEqual, "bar"),
+ tagMatcher("beta", models.MatchEqual, "b"),
+ },
+ wantedSeries: list(),
+ },
+ {
+ name: "MatchEqual excluding all series",
+ givenMatchers: models.Matchers{
+ tagMatcher("unknown", models.MatchEqual, "whatever"),
+ },
+ wantedSeries: list(),
+ },
+ {
+ name: "MatchEqual on empty value",
+ givenMatchers: models.Matchers{
+ tagMatcher("unknown", models.MatchEqual, ""),
+ },
+ wantedSeries: list(series1, series2),
+ },
+
+ {
+ name: "MatchNotEqual on one tag",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchNotEqual, "bar"),
+ },
+ wantedSeries: list(series2),
+ },
+ {
+ name: "MatchNotEqual on two tags",
+ givenMatchers: models.Matchers{
+ tagMatcher("alpha", models.MatchNotEqual, "a"),
+ tagMatcher("beta", models.MatchNotEqual, "b"),
+ },
+ wantedSeries: list(series1),
+ },
+ {
+ name: "Two MatchNotEqual excluding every series each",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchNotEqual, "bar"),
+ tagMatcher("beta", models.MatchNotEqual, "b"),
+ },
+ wantedSeries: list(),
+ },
+ {
+ name: "MatchNotEqual accepting all series",
+ givenMatchers: models.Matchers{
+ tagMatcher("unknown", models.MatchNotEqual, "whatever"),
+ },
+ wantedSeries: list(series1, series2),
+ },
+ {
+ name: "MatchNotEqual on empty value",
+ givenMatchers: models.Matchers{
+ tagMatcher("unknown", models.MatchNotEqual, ""),
+ },
+ wantedSeries: list(),
+ },
+
+ {
+ name: "MatchRegexp on full value",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchRegexp, "bar"),
+ },
+ wantedSeries: list(series1),
+ },
+ {
+ name: "MatchRegexp with wildcard",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchRegexp, "b.+"),
+ },
+ wantedSeries: list(series1),
+ },
+ {
+ name: "MatchRegexp with alternatives",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchRegexp, "bax|bar|baz"),
+ },
+ wantedSeries: list(series1),
+ },
+ {
+ name: "MatchRegexp unmatched",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchRegexp, "bax|baz"),
+ },
+ wantedSeries: list(),
+ },
+
+ {
+ name: "MatchNotRegexp on full value",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchNotRegexp, "bar"),
+ },
+ wantedSeries: list(series2),
+ },
+ {
+ name: "MatchNotRegexp with wildcard",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchNotRegexp, "b.+"),
+ },
+ wantedSeries: list(series2),
+ },
+ {
+ name: "MatchNotRegexp with alternatives",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchNotRegexp, "bax|bar|baz"),
+ },
+ wantedSeries: list(series2),
+ },
+ {
+ name: "MatchNotRegexp matching all",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchNotRegexp, "bax|baz"),
+ },
+ wantedSeries: list(series1, series2),
+ },
+
+ {
+ name: "MatchField",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchField, ""),
+ },
+ wantedSeries: list(series1),
+ },
+
+ {
+ name: "MatchNotField",
+ givenMatchers: models.Matchers{
+ tagMatcher("foo", models.MatchNotField, ""),
+ },
+ wantedSeries: list(series2),
+ },
+
+ {
+ name: `Ignore 'role="remote"' matcher added by Prometheus`,
+ givenMatchers: models.Matchers{
+ tagMatcher("role", models.MatchEqual, "remote"),
+ },
+ wantedSeries: list(series1, series2),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ unfilteredIters, err := toSeriesIterators(allSeries)
+ require.NoError(t, err)
+ filteredIters := filter(unfilteredIters, tc.givenMatchers)
+ filteredSeries := fromSeriesIterators(filteredIters)
+ assert.Equal(t, tc.wantedSeries, filteredSeries)
+ })
+ }
+}
+
+func tagMatcher(tag string, matchType models.MatchType, value string) models.Matcher {
+ return models.Matcher{
+ Type: matchType,
+ Name: []byte(tag),
+ Value: []byte(value),
+ }
+}
+
+func list(tagsList ...parser.Tags) []parser.IngestSeries {
+ list := make([]parser.IngestSeries, 0, len(tagsList))
+
+ for _, tags := range tagsList {
+ list = append(list, parser.IngestSeries{Tags: tags})
+ }
+
+ return list
+}
+
+func toSeriesIterators(series []parser.IngestSeries) (encoding.SeriesIterators, error) {
+ return parser.BuildSeriesIterators(series, time.Now(), time.Hour, iteratorOpts)
+}
+
+func fromSeriesIterators(seriesIters encoding.SeriesIterators) []parser.IngestSeries {
+ result := make([]parser.IngestSeries, 0, seriesIters.Len())
+ for _, iter := range seriesIters.Iters() {
+ tagsIter := iter.Tags()
+ tags := make(parser.Tags, 0, tagsIter.Len())
+ for tagsIter.Next() {
+ tag := tagsIter.Current()
+ tags = append(tags, parser.NewTag(tag.Name.String(), tag.Value.String()))
+ }
+ result = append(result, parser.IngestSeries{Tags: tags})
+ }
+
+ return result
+}
diff --git a/src/cmd/services/m3comparator/main/main.go b/src/cmd/services/m3comparator/main/main.go
index f78a026c04..5f95f212c4 100644
--- a/src/cmd/services/m3comparator/main/main.go
+++ b/src/cmd/services/m3comparator/main/main.go
@@ -22,8 +22,10 @@ package main
import (
"net"
+ "net/http"
"time"
+ "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/query/models"
@@ -35,25 +37,58 @@ import (
"go.uber.org/zap"
)
-func main() {
- var (
- iterPools = pools.BuildIteratorPools()
- poolWrapper = pools.NewPoolsWrapper(iterPools)
+var (
+ iterPools = pools.BuildIteratorPools(
+ pools.BuildIteratorPoolsOptions{})
+ poolWrapper = pools.NewPoolsWrapper(iterPools)
- iOpts = instrument.NewOptions()
- logger = iOpts.Logger()
+ iOpts = instrument.NewOptions()
+ logger = iOpts.Logger()
+ tagOptions = models.NewTagOptions()
- encoderPoolOpts = pool.NewObjectPoolOptions()
- encoderPool = encoding.NewEncoderPool(encoderPoolOpts)
- )
+ encoderPoolOpts = pool.NewObjectPoolOptions()
+ encoderPool = encoding.NewEncoderPool(encoderPoolOpts)
+
+ checkedBytesPool pool.CheckedBytesPool
+ encodingOpts encoding.Options
+)
+
+func init() {
+ buckets := []pool.Bucket{{Capacity: 10, Count: 10}}
+ newBackingBytesPool := func(s []pool.Bucket) pool.BytesPool {
+ return pool.NewBytesPool(s, nil)
+ }
+
+ checkedBytesPool = pool.NewCheckedBytesPool(buckets, nil, newBackingBytesPool)
+ checkedBytesPool.Init()
+
+ encodingOpts = encoding.NewOptions().SetEncoderPool(encoderPool).SetBytesPool(checkedBytesPool)
encoderPool.Init(func() encoding.Encoder {
- return m3tsz.NewEncoder(time.Now(), nil, true, encoding.NewOptions())
+ return m3tsz.NewEncoder(time.Time{}, nil, true, encodingOpts)
})
+}
- querier := &querier{
- encoderPool: encoderPool,
- iteratorPools: iterPools,
+func main() {
+ opts := parser.Options{
+ EncoderPool: encoderPool,
+ IteratorPools: iterPools,
+ TagOptions: tagOptions,
+ InstrumentOptions: iOpts,
+ }
+
+ seriesLoader := newHTTPSeriesLoadHandler(opts)
+
+ querier, err := newQuerier(
+ opts,
+ seriesLoader,
+ time.Hour*12,
+ time.Second*15,
+ 5,
+ )
+ if err != nil {
+ logger.Error("could not create querier", zap.Error(err))
+ return
}
server := remote.NewGRPCServer(
@@ -71,6 +106,13 @@ func main() {
return
}
+ loaderAddr := "0.0.0.0:9001"
+ go func() {
+ if err := http.ListenAndServe(loaderAddr, seriesLoader); err != nil {
+ logger.Error("series load handler failed", zap.Error(err))
+ }
+ }()
+
if err := server.Serve(listener); err != nil {
logger.Error("serve error", zap.Error(err))
}
diff --git a/src/cmd/services/m3comparator/main/parser/parser.go b/src/cmd/services/m3comparator/main/parser/parser.go
new file mode 100644
index 0000000000..1c5902b30d
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/parser/parser.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package parser
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Series is a flat JSON serializable representation of the series.
+type Series struct {
+ id string
+
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+ Tags Tags `json:"tags"`
+ Datapoints Datapoints `json:"datapoints"`
+}
+
+// Tag is a simple JSON serializable representation of a tag.
+type Tag [2]string
+
+// NewTag creates a new tag with a given name and value.
+func NewTag(name, value string) Tag {
+ return Tag{name, value}
+}
+
+// Name returns the tag name.
+func (t Tag) Name() string {
+ return t[0]
+}
+
+// Value returns the tag value.
+func (t Tag) Value() string {
+ return t[1]
+}
+
+// Tags is a simple JSON serializable representation of tags.
+type Tags []Tag
+
+// Get returns a list of tag values with the given name.
+func (t Tags) Get(name string) []string {
+ // NB: this is almost always going to be 0
+ values := make([]string, 0, 2)
+ // NB: This list isn't expected to get very long so it uses array lookup.
+ // If this is a problem in the future, `Tags` be converted to a map.
+ for _, t := range t {
+ if t.Name() == name {
+ values = append(values, t.Value())
+ }
+ }
+
+ return values
+}
+
+// Datapoints is a JSON serializeable list of values for the series.
+type Datapoints []Datapoint
+
+// Datapoint is a JSON serializeable datapoint for the series.
+type Datapoint struct {
+ Value Value `json:"val"`
+ Timestamp time.Time `json:"ts"`
+}
+
+// Value is a JSON serizlizable float64 that allows NaNs.
+type Value float64
+
+// MarshalJSON returns state as the JSON encoding of a Value.
+func (v Value) MarshalJSON() ([]byte, error) {
+ return json.Marshal(fmt.Sprintf("%g", float64(v)))
+}
+
+// UnmarshalJSON unmarshals JSON-encoded data into a Value.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ var str string
+ err := json.Unmarshal(data, &str)
+ if err != nil {
+ return err
+ }
+
+ f, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ *v = Value(f)
+ return nil
+}
+
+// IDOrGenID gets the ID for this result.
+func (r *Series) IDOrGenID() string {
+ if len(r.id) == 0 {
+ tags := make(sort.StringSlice, len(r.Tags))
+ for _, v := range r.Tags {
+ tags = append(tags, fmt.Sprintf("%s:%s,", v[0], v[1]))
+ }
+
+ sort.Sort(tags)
+ var sb strings.Builder
+ for _, t := range tags {
+ sb.WriteString(t)
+ }
+
+ r.id = sb.String()
+ }
+
+ return r.id
+}
diff --git a/src/cmd/services/m3comparator/main/parser/parser_test.go b/src/cmd/services/m3comparator/main/parser/parser_test.go
new file mode 100644
index 0000000000..5459fddc24
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/parser/parser_test.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The abovale copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package parser
+
+import (
+ "encoding/json"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestElectionStateJSONMarshal(t *testing.T) {
+ for _, input := range []struct {
+ val Value
+ str string
+ }{
+ {val: 1231243.123123, str: `"1.231243123123e+06"`},
+ {val: 0.000000001, str: `"1e-09"`},
+ {val: Value(math.NaN()), str: `"NaN"`},
+ {val: Value(math.Inf(1)), str: `"+Inf"`},
+ {val: Value(math.Inf(-1)), str: `"-Inf"`},
+ } {
+ b, err := json.Marshal(input.val)
+ require.NoError(t, err)
+ assert.Equal(t, input.str, string(b))
+
+ var val Value
+ json.Unmarshal([]byte(input.str), &val)
+ if math.IsNaN(float64(input.val)) {
+ assert.True(t, math.IsNaN(float64(val)))
+ } else if math.IsInf(float64(input.val), 1) {
+ assert.True(t, math.IsInf(float64(val), 1))
+ } else if math.IsInf(float64(input.val), -1) {
+ assert.True(t, math.IsInf(float64(val), -1))
+ } else {
+ assert.Equal(t, input.val, val)
+ }
+ }
+}
diff --git a/src/cmd/services/m3comparator/main/series_iterator_builder.go b/src/cmd/services/m3comparator/main/parser/series_iterator_builder.go
similarity index 64%
rename from src/cmd/services/m3comparator/main/series_iterator_builder.go
rename to src/cmd/services/m3comparator/main/parser/series_iterator_builder.go
index 9c47f7835b..c38252f465 100644
--- a/src/cmd/services/m3comparator/main/series_iterator_builder.go
+++ b/src/cmd/services/m3comparator/main/parser/series_iterator_builder.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package main
+package parser
import (
"io"
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/x/ident"
@@ -36,12 +37,13 @@ import (
const sep rune = '!'
const tagSep rune = '.'
-type iteratorOptions struct {
- blockSize time.Duration
- start time.Time
- encoderPool encoding.EncoderPool
- iteratorPools encoding.IteratorPools
- tagOptions models.TagOptions
+// Data is a set of datapoints.
+type Data []ts.Datapoint
+
+// IngestSeries is a series that can be ingested by the parser.
+type IngestSeries struct {
+ Datapoints []Data
+ Tags Tags
}
var iterAlloc = func(r io.Reader, _ namespace.SchemaDescr) encoding.ReaderIterator {
@@ -49,12 +51,13 @@ var iterAlloc = func(r io.Reader, _ namespace.SchemaDescr) encoding.ReaderIterat
}
func buildBlockReader(
- block seriesBlock,
+ block Data,
start time.Time,
- opts iteratorOptions,
+ blockSize time.Duration,
+ opts Options,
) ([]xio.BlockReader, error) {
- encoder := opts.encoderPool.Get()
- encoder.Reset(start, len(block), nil)
+ encoder := opts.EncoderPool.Get()
+ encoder.Reset(time.Now(), len(block), nil)
for _, dp := range block {
err := encoder.Encode(dp, xtime.Second, nil)
if err != nil {
@@ -65,24 +68,26 @@ func buildBlockReader(
segment := encoder.Discard()
return []xio.BlockReader{
- xio.BlockReader{
+ {
SegmentReader: xio.NewSegmentReader(segment),
Start: start,
- BlockSize: opts.blockSize,
+ BlockSize: blockSize,
},
}, nil
}
func buildTagIteratorAndID(
- tagMap tagMap,
+ parsedTags Tags,
opts models.TagOptions,
) (ident.TagIterator, ident.ID) {
var (
tags = ident.Tags{}
- modelTags = models.NewTags(len(tagMap), opts)
+ modelTags = models.NewTags(len(parsedTags), opts)
)
- for name, value := range tagMap {
+ for _, tag := range parsedTags {
+ name := tag.Name()
+ value := tag.Value()
modelTags = modelTags.AddOrUpdateTag(models.Tag{
Name: []byte(name),
Value: []byte(value),
@@ -96,62 +101,66 @@ func buildTagIteratorAndID(
}
func buildSeriesIterator(
- series series,
- opts iteratorOptions,
+ series IngestSeries,
+ start time.Time,
+ blockSize time.Duration,
+ opts Options,
) (encoding.SeriesIterator, error) {
var (
- blocks = series.blocks
- tags = series.tags
- readers = make([][]xio.BlockReader, 0, len(blocks))
- start = opts.start
+ points = series.Datapoints
+ tags = series.Tags
+ readers = make([][]xio.BlockReader, 0, len(points))
)
- for _, block := range blocks {
- seriesBlock, err := buildBlockReader(block, start, opts)
+ blockStart := start
+ for _, block := range points {
+ seriesBlock, err := buildBlockReader(block, blockStart, blockSize, opts)
if err != nil {
return nil, err
}
readers = append(readers, seriesBlock)
- start = start.Add(opts.blockSize)
+ blockStart = blockStart.Add(blockSize)
}
multiReader := encoding.NewMultiReaderIterator(
iterAlloc,
- opts.iteratorPools.MultiReaderIterator(),
+ opts.IteratorPools.MultiReaderIterator(),
)
sliceOfSlicesIter := xio.NewReaderSliceOfSlicesFromBlockReadersIterator(readers)
multiReader.ResetSliceOfSlices(sliceOfSlicesIter, nil)
- end := opts.start.Add(opts.blockSize)
- if len(blocks) > 0 {
- lastBlock := blocks[len(blocks)-1]
+ end := start.Add(blockSize)
+ if len(points) > 0 {
+ lastBlock := points[len(points)-1]
end = lastBlock[len(lastBlock)-1].Timestamp
}
- tagIter, id := buildTagIteratorAndID(tags, opts.tagOptions)
+ tagIter, id := buildTagIteratorAndID(tags, opts.TagOptions)
return encoding.NewSeriesIterator(
- encoding.SeriesIteratorOptions{
- ID: id,
- Namespace: ident.StringID("ns"),
- Tags: tagIter,
- StartInclusive: opts.start,
- EndExclusive: end,
- Replicas: []encoding.MultiReaderIterator{
- multiReader,
- },
- }, nil),
- nil
+ encoding.SeriesIteratorOptions{
+ ID: id,
+ Namespace: ident.StringID("ns"),
+ Tags: tagIter,
+ StartInclusive: xtime.ToUnixNano(start),
+ EndExclusive: xtime.ToUnixNano(end),
+ Replicas: []encoding.MultiReaderIterator{
+ multiReader,
+ },
+ }, nil), nil
}
-func buildSeriesIterators(
- series []series,
- opts iteratorOptions,
+// BuildSeriesIterators builds series iterators from parser data.
+func BuildSeriesIterators(
+ series []IngestSeries,
+ start time.Time,
+ blockSize time.Duration,
+ opts Options,
) (encoding.SeriesIterators, error) {
iters := make([]encoding.SeriesIterator, 0, len(series))
for _, s := range series {
- iter, err := buildSeriesIterator(s, opts)
+ iter, err := buildSeriesIterator(s, start, blockSize, opts)
if err != nil {
return nil, err
}
@@ -161,6 +170,6 @@ func buildSeriesIterators(
return encoding.NewSeriesIterators(
iters,
- opts.iteratorPools.MutableSeriesIterators(),
+ opts.IteratorPools.MutableSeriesIterators(),
), nil
}
diff --git a/src/cmd/services/m3comparator/main/parser/series_load.go b/src/cmd/services/m3comparator/main/parser/series_load.go
new file mode 100644
index 0000000000..078a22ad43
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/parser/series_load.go
@@ -0,0 +1,285 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package parser
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "sync"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/x/instrument"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "go.uber.org/zap"
+)
+
+// Options are options for series parsing.
+type Options struct {
+ EncoderPool encoding.EncoderPool
+ IteratorPools encoding.IteratorPools
+ TagOptions models.TagOptions
+ InstrumentOptions instrument.Options
+ Size int
+}
+
+type nameIDSeriesMap map[string]idSeriesMap
+
+type idSeriesMap struct {
+ start time.Time
+ end time.Time
+ series map[string][]Series
+}
+
+type seriesReader struct {
+ iterOpts Options
+ nameIDSeriesMap nameIDSeriesMap
+ sync.RWMutex
+}
+
+// SeriesReader reads SeriesIterators from a generic io.Reader.
+type SeriesReader interface {
+ SeriesIterators(name string) (encoding.SeriesIterators, error)
+ Load(reader io.Reader) error
+ Clear()
+}
+
+// NewSeriesReader creates a new SeriesReader that reads entries as
+// a slice of Series.
+func NewSeriesReader(opts Options) SeriesReader {
+ size := 10
+ if opts.Size != 0 {
+ size = opts.Size
+ }
+
+ return &seriesReader{
+ iterOpts: opts,
+ nameIDSeriesMap: make(nameIDSeriesMap, size),
+ }
+}
+
+func (l *seriesReader) SeriesIterators(name string) (encoding.SeriesIterators, error) {
+ l.RLock()
+ defer l.RUnlock()
+
+ var seriesMaps []idSeriesMap
+ logger := l.iterOpts.InstrumentOptions.Logger()
+ if name == "" {
+ // return all preloaded data
+ seriesMaps = make([]idSeriesMap, 0, len(l.nameIDSeriesMap))
+ for _, series := range l.nameIDSeriesMap {
+ seriesMaps = append(seriesMaps, series)
+ }
+ } else {
+ seriesMap, found := l.nameIDSeriesMap[name]
+ if !found || len(seriesMap.series) == 0 {
+ return nil, nil
+ }
+
+ seriesMaps = append(seriesMaps, seriesMap)
+ }
+
+ iters := make([]encoding.SeriesIterator, 0, len(seriesMaps))
+ for _, seriesMap := range seriesMaps {
+ for _, seriesPerID := range seriesMap.series {
+ for _, series := range seriesPerID {
+ encoder := l.iterOpts.EncoderPool.Get()
+ dps := series.Datapoints
+ startTime := time.Time{}
+ if len(dps) > 0 {
+ startTime = dps[0].Timestamp.Truncate(time.Hour)
+ }
+
+ encoder.Reset(startTime, len(dps), nil)
+ for _, dp := range dps {
+ err := encoder.Encode(ts.Datapoint{
+ Timestamp: dp.Timestamp,
+ Value: float64(dp.Value),
+ TimestampNanos: xtime.ToUnixNano(dp.Timestamp),
+ }, xtime.Nanosecond, nil)
+
+ if err != nil {
+ encoder.Close()
+ logger.Error("error encoding datapoints", zap.Error(err))
+ return nil, err
+ }
+ }
+
+ readers := [][]xio.BlockReader{{{
+ SegmentReader: xio.NewSegmentReader(encoder.Discard()),
+ Start: series.Start,
+ BlockSize: series.End.Sub(series.Start),
+ }}}
+
+ multiReader := encoding.NewMultiReaderIterator(
+ iterAlloc,
+ l.iterOpts.IteratorPools.MultiReaderIterator(),
+ )
+
+ sliceOfSlicesIter := xio.NewReaderSliceOfSlicesFromBlockReadersIterator(readers)
+ multiReader.ResetSliceOfSlices(sliceOfSlicesIter, nil)
+
+ tagIter, id := buildTagIteratorAndID(series.Tags, l.iterOpts.TagOptions)
+ iter := encoding.NewSeriesIterator(
+ encoding.SeriesIteratorOptions{
+ ID: id,
+ Namespace: ident.StringID("ns"),
+ Tags: tagIter,
+ StartInclusive: xtime.ToUnixNano(series.Start),
+ EndExclusive: xtime.ToUnixNano(series.End),
+ Replicas: []encoding.MultiReaderIterator{
+ multiReader,
+ },
+ }, nil)
+
+ iters = append(iters, iter)
+ }
+ }
+ }
+
+ return encoding.NewSeriesIterators(
+ iters,
+ l.iterOpts.IteratorPools.MutableSeriesIterators(),
+ ), nil
+}
+
+func calculateSeriesRange(seriesList []Series) (time.Time, time.Time) {
+ // NB: keep consistent start/end for the entire ingested set.
+ //
+ // Try taking from set start/end; infer from first/last endpoint otherwise.
+ start, end := time.Time{}, time.Time{}
+ for _, series := range seriesList {
+ if start.IsZero() || series.Start.Before(start) {
+ start = series.Start
+ }
+
+ if end.IsZero() || series.End.Before(start) {
+ end = series.End
+ }
+ }
+
+ if !start.IsZero() && !end.IsZero() {
+ return start, end
+ }
+
+ for _, series := range seriesList {
+ dps := series.Datapoints
+ if len(dps) == 0 {
+ continue
+ }
+
+ first, last := dps[0].Timestamp, dps[len(dps)-1].Timestamp
+ if start.IsZero() || first.Before(start) {
+ start = first
+ }
+
+ if end.IsZero() || last.Before(start) {
+ end = last
+ }
+ }
+
+ return start, end
+}
+
+func (l *seriesReader) Load(reader io.Reader) error {
+ l.Lock()
+ defer l.Unlock()
+
+ buf, err := ioutil.ReadAll(reader)
+ logger := l.iterOpts.InstrumentOptions.Logger()
+ if err != nil {
+ logger.Error("could not read body", zap.Error(err))
+ return err
+ }
+
+ seriesList := make([]Series, 0, 10)
+ if err := json.Unmarshal(buf, &seriesList); err != nil {
+ logger.Error("could not unmarshal queries", zap.Error(err))
+ return err
+ }
+
+ // NB: keep consistent start/end for the entire ingested set.
+ start, end := calculateSeriesRange(seriesList)
+ nameKey := string(l.iterOpts.TagOptions.MetricName())
+ nameMap := make(nameIDSeriesMap, len(seriesList))
+ for _, series := range seriesList {
+ names := series.Tags.Get(nameKey)
+ if len(names) != 1 || len(series.Datapoints) == 0 {
+ if len(names) > 1 {
+ err := fmt.Errorf("series has duplicate __name__ tags: %v", names)
+ logger.Error("bad __name__ variable", zap.Error(err))
+ return err
+ }
+
+ continue
+ }
+
+ name := names[0]
+ seriesMap, found := nameMap[name]
+ if !found {
+ seriesMap = idSeriesMap{
+ series: make(map[string][]Series, len(seriesList)),
+ }
+ }
+
+ id := series.IDOrGenID()
+ seriesList, found := seriesMap.series[id]
+ if !found {
+ seriesList = make([]Series, 0, 1)
+ } else {
+ logger.Info("duplicate tag set in loaded series",
+ zap.Int("count", len(seriesList)),
+ zap.String("id", id))
+ }
+
+ seriesList = append(seriesList, series)
+ seriesMap.series[id] = seriesList
+ logger.Info("setting series",
+ zap.String("name", name), zap.String("id", id))
+
+ series.Start = start
+ series.End = end
+ nameMap[name] = seriesMap
+ }
+
+ for k, v := range nameMap {
+ // NB: overwrite existing series.
+ l.nameIDSeriesMap[k] = v
+ }
+
+ return nil
+}
+
+func (l *seriesReader) Clear() {
+ l.Lock()
+ for k := range l.nameIDSeriesMap {
+ delete(l.nameIDSeriesMap, k)
+ }
+
+ l.Unlock()
+}
diff --git a/src/cmd/services/m3comparator/main/querier.go b/src/cmd/services/m3comparator/main/querier.go
index f0159db3db..efae6f7fcf 100644
--- a/src/cmd/services/m3comparator/main/querier.go
+++ b/src/cmd/services/m3comparator/main/querier.go
@@ -26,46 +26,79 @@ import (
"fmt"
"math"
"math/rand"
+ "regexp"
"strconv"
+ "strings"
"sync"
"time"
+ "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/prometheus/common/model"
)
var _ m3.Querier = (*querier)(nil)
type querier struct {
- encoderPool encoding.EncoderPool
- iteratorPools encoding.IteratorPools
+ iteratorOpts parser.Options
+ handler seriesLoadHandler
+ blockSize time.Duration
+ defaultResolution time.Duration
+ histogramBucketCount uint
sync.Mutex
}
-func noop() error { return nil }
-
-type seriesBlock []ts.Datapoint
-type tagMap map[string]string
-type series struct {
- blocks []seriesBlock
- tags tagMap
+func newQuerier(
+ iteratorOpts parser.Options,
+ handler seriesLoadHandler,
+ blockSize time.Duration,
+ defaultResolution time.Duration,
+ histogramBucketCount uint,
+) (*querier, error) {
+ if blockSize <= 0 {
+ return nil, fmt.Errorf("blockSize must be positive, got %d", blockSize)
+ }
+ if defaultResolution <= 0 {
+ return nil, fmt.Errorf("defaultResolution must be positive, got %d", defaultResolution)
+ }
+ return &querier{
+ iteratorOpts: iteratorOpts,
+ handler: handler,
+ blockSize: blockSize,
+ defaultResolution: defaultResolution,
+ histogramBucketCount: histogramBucketCount,
+ }, nil
}
-func generateSeriesBlock(
+func noop() error { return nil }
+
+func (q *querier) generateSeriesBlock(
start time.Time,
- blockSize time.Duration,
resolution time.Duration,
-) seriesBlock {
- numPoints := int(blockSize / resolution)
- dps := make(seriesBlock, 0, numPoints)
+ integerValues bool,
+) parser.Data {
+ numPoints := int(q.blockSize / resolution)
+ dps := make(parser.Data, 0, numPoints)
for i := 0; i < numPoints; i++ {
+ stamp := start.Add(resolution * time.Duration(i))
+ var value float64
+ if integerValues {
+ value = float64(rand.Intn(1000))
+ } else {
+ value = rand.Float64()
+ }
dp := ts.Datapoint{
- Timestamp: start.Add(resolution * time.Duration(i)),
- Value: rand.Float64(),
+ Timestamp: stamp,
+ TimestampNanos: xtime.ToUnixNano(stamp),
+ Value: value,
}
dps = append(dps, dp)
@@ -74,44 +107,30 @@ func generateSeriesBlock(
return dps
}
-func generateSeries(
+func (q *querier) generateSeries(
start time.Time,
end time.Time,
- blockSize time.Duration,
resolution time.Duration,
- tags tagMap,
-) (series, error) {
- numBlocks := int(math.Ceil(float64(end.Sub(start)) / float64(blockSize)))
+ tags parser.Tags,
+ integerValues bool,
+) (parser.IngestSeries, error) {
+ numBlocks := int(math.Ceil(float64(end.Sub(start)) / float64(q.blockSize)))
if numBlocks == 0 {
- return series{}, fmt.Errorf("comparator querier: no blocks generated")
+ return parser.IngestSeries{}, fmt.Errorf("comparator querier: no blocks generated")
}
- blocks := make([]seriesBlock, 0, numBlocks)
+ blocks := make([]parser.Data, 0, numBlocks)
for i := 0; i < numBlocks; i++ {
- blocks = append(blocks, generateSeriesBlock(start, blockSize, resolution))
- start = start.Add(blockSize)
+ blocks = append(blocks, q.generateSeriesBlock(start, resolution, integerValues))
+ start = start.Add(q.blockSize)
}
- return series{
- blocks: blocks,
- tags: tags,
+ return parser.IngestSeries{
+ Datapoints: blocks,
+ Tags: tags,
}, nil
}
-func (q *querier) generateOptions(
- start time.Time,
- blockSize time.Duration,
- tagOptions models.TagOptions,
-) iteratorOptions {
- return iteratorOptions{
- start: start,
- blockSize: blockSize,
- encoderPool: q.encoderPool,
- iteratorPools: q.iteratorPools,
- tagOptions: tagOptions,
- }
-}
-
type seriesGen struct {
name string
res time.Duration
@@ -122,31 +141,131 @@ func (q *querier) FetchCompressed(
ctx context.Context,
query *storage.FetchQuery,
options *storage.FetchOptions,
-) (m3.SeriesFetchResult, m3.Cleanup, error) {
+) (consolidators.SeriesFetchResult, m3.Cleanup, error) {
+ var (
+ iters encoding.SeriesIterators
+ randomSeries []parser.IngestSeries
+ ignoreFilter bool
+ err error
+ strictMetricsFilter bool
+ )
+
+ name := q.iteratorOpts.TagOptions.MetricName()
+ for _, matcher := range query.TagMatchers {
+ if bytes.Equal(name, matcher.Name) {
+
+ metricsName := string(matcher.Value)
+
+ // NB: the default behaviour of this querier is to return predefined metrics with random data if no match by
+ // metrics name is found. To force it return an empty result, query the "nonexistent*" metrics.
+ if match, _ := regexp.MatchString("^nonexist[ae]nt", metricsName); match {
+ return consolidators.SeriesFetchResult{}, noop, nil
+ }
+
+ if matcher.Type == models.MatchEqual {
+ strictMetricsFilter = true
+ iters, err = q.handler.getSeriesIterators(metricsName)
+ if err != nil {
+ return consolidators.SeriesFetchResult{}, noop, err
+ }
+
+ break
+ }
+ }
+ }
+
+ if iters == nil && !strictMetricsFilter && len(query.TagMatchers) > 0 {
+ iters, err = q.handler.getSeriesIterators("")
+ if err != nil {
+ return consolidators.SeriesFetchResult{}, noop, err
+ }
+ }
+
+ if iters == nil || iters.Len() == 0 {
+ randomSeries, ignoreFilter, err = q.generateRandomSeries(query)
+ if err != nil {
+ return consolidators.SeriesFetchResult{}, noop, err
+ }
+ iters, err = parser.BuildSeriesIterators(
+ randomSeries, query.Start, q.blockSize, q.iteratorOpts)
+ if err != nil {
+ return consolidators.SeriesFetchResult{}, noop, err
+ }
+ }
+
+ if !ignoreFilter {
+ filteredIters := filter(iters, query.TagMatchers)
+
+ cleanup := func() error {
+ iters.Close()
+ filteredIters.Close()
+ return nil
+ }
+
+ result, err := consolidators.NewSeriesFetchResult(
+ filteredIters, nil, block.NewResultMetadata())
+ return result, cleanup, err
+ }
+
+ cleanup := func() error {
+ iters.Close()
+ return nil
+ }
+
+ result, err := consolidators.NewSeriesFetchResult(
+ iters, nil, block.NewResultMetadata())
+ return result, cleanup, err
+}
+
+func (q *querier) generateRandomSeries(
+ query *storage.FetchQuery,
+) (series []parser.IngestSeries, ignoreFilter bool, err error) {
+ var (
+ start = query.Start.Truncate(q.blockSize)
+ end = query.End.Truncate(q.blockSize).Add(q.blockSize)
+ )
+
+ metricNameTag := q.iteratorOpts.TagOptions.MetricName()
+ for _, matcher := range query.TagMatchers {
+ if bytes.Equal(metricNameTag, matcher.Name) {
+ if matched, _ := regexp.Match(`^multi_\d+$`, matcher.Value); matched {
+ series, err = q.generateMultiSeriesMetrics(string(matcher.Value), start, end)
+ return
+ }
+ if matched, _ := regexp.Match(`^histogram_\d+_bucket$`, matcher.Value); matched {
+ series, err = q.generateHistogramMetrics(string(matcher.Value), start, end)
+ return
+ }
+ }
+ }
+
+ ignoreFilter = true
+ series, err = q.generateSingleSeriesMetrics(query, start, end)
+ return
+}
+
+func (q *querier) generateSingleSeriesMetrics(
+ query *storage.FetchQuery,
+ start time.Time,
+ end time.Time,
+) ([]parser.IngestSeries, error) {
var (
- // TODO: take from config.
- blockSize = time.Hour * 12
- start = query.Start.Truncate(blockSize)
- end = query.End.Truncate(blockSize).Add(blockSize)
- tagOpts = models.NewTagOptions()
- opts = q.generateOptions(start, blockSize, tagOpts)
-
- // TODO: take from config.
gens = []seriesGen{
- seriesGen{"foo", time.Second},
- seriesGen{"bar", time.Second * 15},
- seriesGen{"quail", time.Minute},
+ {"foo", time.Second},
+ {"bar", time.Second * 15},
+ {"quail", time.Minute},
}
actualGens []seriesGen
)
- q.Lock()
- defer q.Unlock()
- rand.Seed(start.Unix())
+ unlock := q.lockAndSeed(start)
+ defer unlock()
+
+ metricNameTag := q.iteratorOpts.TagOptions.MetricName()
for _, matcher := range query.TagMatchers {
// filter if name, otherwise return all.
- if bytes.Equal(opts.tagOptions.MetricName(), matcher.Name) {
+ if bytes.Equal(metricNameTag, matcher.Name) {
value := string(matcher.Value)
for _, gen := range gens {
if value == gen.name {
@@ -160,15 +279,15 @@ func (q *querier) FetchCompressed(
cStr := string(matcher.Value)
count, err := strconv.Atoi(cStr)
if err != nil {
- return m3.SeriesFetchResult{}, noop, err
+ return nil, err
}
- actualGens = make([]seriesGen, count)
+ actualGens = make([]seriesGen, 0, count)
for i := 0; i < count; i++ {
- actualGens[i] = seriesGen{
- res: time.Second * 15,
+ actualGens = append(actualGens, seriesGen{
+ res: q.defaultResolution,
name: fmt.Sprintf("foo_%d", i),
- }
+ })
}
break
@@ -179,36 +298,117 @@ func (q *querier) FetchCompressed(
actualGens = gens
}
- seriesList := make([]series, 0, len(actualGens))
+ seriesList := make([]parser.IngestSeries, 0, len(actualGens))
for _, gen := range actualGens {
- tagMap := map[string]string{
- "__name__": gen.name,
- "foobar": "qux",
- "name": gen.name,
+ tags := parser.Tags{
+ parser.NewTag(model.MetricNameLabel, gen.name),
+ parser.NewTag("foobar", "qux"),
+ parser.NewTag("name", gen.name),
}
- series, err := generateSeries(start, end, blockSize, gen.res, tagMap)
+ series, err := q.generateSeries(start, end, gen.res, tags, false)
if err != nil {
- return m3.SeriesFetchResult{}, noop, err
+ return nil, err
}
seriesList = append(seriesList, series)
}
- iters, err := buildSeriesIterators(seriesList, opts)
+ return seriesList, nil
+}
+
+func (q *querier) generateMultiSeriesMetrics(
+ metricsName string,
+ start time.Time,
+ end time.Time,
+) ([]parser.IngestSeries, error) {
+ suffix := strings.TrimPrefix(metricsName, "multi_")
+ seriesCount, err := strconv.Atoi(suffix)
if err != nil {
- return m3.SeriesFetchResult{}, noop, err
+ return nil, err
}
- cleanup := func() error {
- iters.Close()
- return nil
+ unlock := q.lockAndSeed(start)
+ defer unlock()
+
+ seriesList := make([]parser.IngestSeries, 0, seriesCount)
+ for id := 0; id < seriesCount; id++ {
+ tags := multiSeriesTags(metricsName, id)
+
+ series, err := q.generateSeries(start, end, q.defaultResolution, tags, false)
+ if err != nil {
+ return nil, err
+ }
+
+ seriesList = append(seriesList, series)
}
- return m3.SeriesFetchResult{
- SeriesIterators: iters,
- Metadata: block.NewResultMetadata(),
- }, cleanup, nil
+ return seriesList, nil
+}
+
+func (q *querier) generateHistogramMetrics(
+ metricsName string,
+ start time.Time,
+ end time.Time,
+) ([]parser.IngestSeries, error) {
+ suffix := strings.TrimPrefix(metricsName, "histogram_")
+ countStr := strings.TrimSuffix(suffix, "_bucket")
+ seriesCount, err := strconv.Atoi(countStr)
+ if err != nil {
+ return nil, err
+ }
+
+ unlock := q.lockAndSeed(start)
+ defer unlock()
+
+ seriesList := make([]parser.IngestSeries, 0, seriesCount)
+ for id := 0; id < seriesCount; id++ {
+ le := 1.0
+ var previousSeriesBlocks []parser.Data
+ for bucket := uint(0); bucket < q.histogramBucketCount; bucket++ {
+ tags := multiSeriesTags(metricsName, id)
+ leStr := "+Inf"
+ if bucket < q.histogramBucketCount-1 {
+ leStr = strconv.FormatFloat(le, 'f', -1, 64)
+ }
+ leTag := parser.NewTag("le", leStr)
+ tags = append(tags, leTag)
+ le *= 10
+
+ series, err := q.generateSeries(start, end, q.defaultResolution, tags, true)
+ if err != nil {
+ return nil, err
+ }
+
+ for i, prevBlock := range previousSeriesBlocks {
+ for j, prevValue := range prevBlock {
+ series.Datapoints[i][j].Value += prevValue.Value
+ }
+ }
+
+ seriesList = append(seriesList, series)
+
+ previousSeriesBlocks = series.Datapoints
+ }
+ }
+
+ return seriesList, nil
+}
+
+func multiSeriesTags(metricsName string, id int) parser.Tags {
+ return parser.Tags{
+ parser.NewTag(model.MetricNameLabel, metricsName),
+ parser.NewTag("id", strconv.Itoa(id)),
+ parser.NewTag("parity", strconv.Itoa(id%2)),
+ parser.NewTag("const", "x"),
+ }
+}
+
+func (q *querier) lockAndSeed(start time.Time) func() {
+ q.Lock()
+ rand.Seed(start.Unix())
+
+ return q.Unlock
}
// SearchCompressed fetches matching tags based on a query.
@@ -216,8 +416,8 @@ func (q *querier) SearchCompressed(
ctx context.Context,
query *storage.FetchQuery,
options *storage.FetchOptions,
-) (m3.TagResult, m3.Cleanup, error) {
- return m3.TagResult{}, noop, fmt.Errorf("not impl")
+) (consolidators.TagResult, m3.Cleanup, error) {
+ return consolidators.TagResult{}, noop, fmt.Errorf("not impl")
}
// CompleteTagsCompressed returns autocompleted tag results.
@@ -225,13 +425,13 @@ func (q *querier) CompleteTagsCompressed(
ctx context.Context,
query *storage.CompleteTagsQuery,
options *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
nameOnly := query.CompleteNameOnly
// TODO: take from config.
- return &storage.CompleteTagsResult{
+ return &consolidators.CompleteTagsResult{
CompleteNameOnly: nameOnly,
- CompletedTags: []storage.CompletedTag{
- storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
+ {
Name: []byte("__name__"),
Values: [][]byte{[]byte("foo"), []byte("foo"), []byte("quail")},
},
diff --git a/src/cmd/services/m3comparator/main/querier_test.go b/src/cmd/services/m3comparator/main/querier_test.go
new file mode 100644
index 0000000000..b4142bccd1
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/querier_test.go
@@ -0,0 +1,431 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type testSeriesLoadHandler struct {
+ iters encoding.SeriesIterators
+}
+
+func (h *testSeriesLoadHandler) getSeriesIterators(
+ name string) (encoding.SeriesIterators, error) {
+ return h.iters, nil
+}
+
+var _ seriesLoadHandler = (*testSeriesLoadHandler)(nil)
+
+type tagMap map[string]string
+
+var (
+ iteratorOpts = parser.Options{
+ EncoderPool: encoderPool,
+ IteratorPools: iterPools,
+ TagOptions: tagOptions,
+ InstrumentOptions: iOpts,
+ }
+ metricNameTag = string(iteratorOpts.TagOptions.MetricName())
+)
+
+const (
+ blockSize = time.Hour * 12
+ defaultResolution = time.Second * 30
+ metricsName = "preloaded"
+ predefinedSeriesCount = 10
+ histogramBucketCount = 4
+)
+
+func TestFetchCompressed(t *testing.T) {
+ tests := []struct {
+ name string
+ queryTagName string
+ queryTagValue string
+ expectedCount int
+ }{
+ {
+ name: "querying by metric name returns preloaded data",
+ queryTagName: metricNameTag,
+ queryTagValue: metricsName,
+ expectedCount: predefinedSeriesCount,
+ },
+ {
+ name: "querying without metric name just by other tag returns preloaded data",
+ queryTagName: "tag1",
+ queryTagValue: "test2",
+ expectedCount: 4,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ query := matcherQuery(t, tt.queryTagName, tt.queryTagValue)
+ querier := setupQuerier(ctrl, query)
+
+ result, cleanup, err := querier.FetchCompressed(nil, query, nil)
+ assert.NoError(t, err)
+ defer cleanup()
+
+ assert.Equal(t, tt.expectedCount, len(result.SeriesIterators()))
+ })
+ }
+}
+
+func TestGenerateRandomSeries(t *testing.T) {
+ tests := []struct {
+ name string
+ givenQuery *storage.FetchQuery
+ wantSeries []tagMap
+ }{
+ {
+ name: "querying nonexistent_metric returns empty",
+ givenQuery: matcherQuery(t, metricNameTag, "nonexistent_metric"),
+ wantSeries: []tagMap{},
+ },
+ {
+ name: "querying nonexistant returns empty",
+ givenQuery: matcherQuery(t, metricNameTag, "nonexistant"),
+ wantSeries: []tagMap{},
+ },
+ {
+ name: "random data for known metrics",
+ givenQuery: matcherQuery(t, metricNameTag, "quail"),
+ wantSeries: []tagMap{
+ {
+ metricNameTag: "quail",
+ "foobar": "qux",
+ "name": "quail",
+ },
+ },
+ },
+ {
+ name: "a hardcoded list of metrics",
+ givenQuery: matcherQuery(t, metricNameTag, "unknown"),
+ wantSeries: []tagMap{
+ {
+ metricNameTag: "foo",
+ "foobar": "qux",
+ "name": "foo",
+ },
+ {
+ metricNameTag: "bar",
+ "foobar": "qux",
+ "name": "bar",
+ },
+ {
+ metricNameTag: "quail",
+ "foobar": "qux",
+ "name": "quail",
+ },
+ },
+ },
+ {
+ name: "a given number of single series metrics",
+ givenQuery: matcherQuery(t, "gen", "2"),
+ wantSeries: []tagMap{
+ {
+ metricNameTag: "foo_0",
+ "foobar": "qux",
+ "name": "foo_0",
+ },
+ {
+ metricNameTag: "foo_1",
+ "foobar": "qux",
+ "name": "foo_1",
+ },
+ },
+ },
+ {
+ name: "single metrics with a given number of series",
+ givenQuery: matcherQuery(t, metricNameTag, "multi_4"),
+ wantSeries: []tagMap{
+ {
+ metricNameTag: "multi_4",
+ "const": "x",
+ "id": "0",
+ "parity": "0",
+ },
+ {
+ metricNameTag: "multi_4",
+ "const": "x",
+ "id": "1",
+ "parity": "1",
+ },
+ {
+ metricNameTag: "multi_4",
+ "const": "x",
+ "id": "2",
+ "parity": "0",
+ },
+ {
+ metricNameTag: "multi_4",
+ "const": "x",
+ "id": "3",
+ "parity": "1",
+ },
+ },
+ },
+ {
+ name: "histogram metrics",
+ givenQuery: matcherQuery(t, metricNameTag, "histogram_2_bucket"),
+ wantSeries: []tagMap{
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "0",
+ "parity": "0",
+ "le": "1",
+ },
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "0",
+ "parity": "0",
+ "le": "10",
+ },
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "0",
+ "parity": "0",
+ "le": "100",
+ },
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "0",
+ "parity": "0",
+ "le": "+Inf",
+ },
+
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "1",
+ "parity": "1",
+ "le": "1",
+ },
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "1",
+ "parity": "1",
+ "le": "10",
+ },
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "1",
+ "parity": "1",
+ "le": "100",
+ },
+ {
+ metricNameTag: "histogram_2_bucket",
+ "const": "x",
+ "id": "1",
+ "parity": "1",
+ "le": "+Inf",
+ },
+ },
+ },
+ {
+ name: "apply tag filter",
+ givenQuery: and(
+ matcherQuery(t, metricNameTag, "multi_5"),
+ matcherQuery(t, "parity", "1")),
+ wantSeries: []tagMap{
+ {
+ metricNameTag: "multi_5",
+ "const": "x",
+ "id": "1",
+ "parity": "1",
+ },
+ {
+ metricNameTag: "multi_5",
+ "const": "x",
+ "id": "3",
+ "parity": "1",
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ querier, err := setupRandomGenQuerier(ctrl)
+ assert.NoError(t, err)
+
+ result, cleanup, err := querier.FetchCompressed(nil, tt.givenQuery, nil)
+ assert.NoError(t, err)
+ defer cleanup()
+
+ iters := result.SeriesIterators()
+ require.Equal(t, len(tt.wantSeries), len(iters))
+ for i, expectedTags := range tt.wantSeries {
+ iter := iters[i]
+ assert.Equal(t, expectedTags, extractTags(iter))
+ assert.True(t, iter.Next(), "Must have some datapoints generated.")
+ }
+ })
+ }
+}
+
+func TestHistogramBucketsAddUp(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ querier, err := setupRandomGenQuerier(ctrl)
+ assert.NoError(t, err)
+
+ histogramQuery := matcherQuery(t, metricNameTag, "histogram_1_bucket")
+ result, cleanup, err := querier.FetchCompressed(nil, histogramQuery, nil)
+ assert.NoError(t, err)
+ defer cleanup()
+
+ iters := result.SeriesIterators()
+ require.Equal(t, histogramBucketCount,
+ len(iters), "number of histogram buckets")
+
+ iter0 := iters[0]
+ for iter0.Next() {
+ v0, t1, _ := iter0.Current()
+ for i := 1; i < histogramBucketCount; i++ {
+ iter := iters[i]
+ require.True(t, iter.Next(), "all buckets must have the same length")
+ vi, ti, _ := iter.Current()
+ assert.True(t, vi.Value >= v0.Value, "bucket values must be non decreasing")
+ assert.Equal(t, v0.Timestamp, vi.Timestamp, "bucket values timestamps must match")
+ assert.Equal(t, t1, ti)
+ }
+ }
+
+ for _, iter := range iters {
+ require.False(t, iter.Next(), "all buckets must have the same length")
+ }
+}
+
+func matcherQuery(t *testing.T, matcherName, matcherValue string) *storage.FetchQuery {
+ matcher, err := models.NewMatcher(models.MatchEqual, []byte(matcherName), []byte(matcherValue))
+ assert.NoError(t, err)
+
+ now := time.Now()
+
+ return &storage.FetchQuery{
+ TagMatchers: []models.Matcher{matcher},
+ Start: now.Add(-time.Hour),
+ End: now,
+ }
+}
+
+func and(query1, query2 *storage.FetchQuery) *storage.FetchQuery {
+ return &storage.FetchQuery{
+ TagMatchers: append(query1.TagMatchers, query2.TagMatchers...),
+ Start: query1.Start,
+ End: query1.End,
+ }
+}
+
+func extractTags(seriesIter encoding.SeriesIterator) tagMap {
+ tagsIter := seriesIter.Tags().Duplicate()
+ defer tagsIter.Close()
+
+ tags := make(tagMap)
+ for tagsIter.Next() {
+ tag := tagsIter.Current()
+ tags[tag.Name.String()] = tag.Value.String()
+ }
+
+ return tags
+}
+
+func setupQuerier(ctrl *gomock.Controller, query *storage.FetchQuery) *querier {
+ metricsTag := ident.NewTags(ident.Tag{
+ Name: ident.BytesID(tagOptions.MetricName()),
+ Value: ident.BytesID(metricsName),
+ },
+ ident.Tag{
+ Name: ident.BytesID("tag1"),
+ Value: ident.BytesID("test"),
+ },
+ )
+ metricsTag2 := ident.NewTags(ident.Tag{
+ Name: ident.BytesID(tagOptions.MetricName()),
+ Value: ident.BytesID(metricsName),
+ },
+ ident.Tag{
+ Name: ident.BytesID("tag1"),
+ Value: ident.BytesID("test2"),
+ },
+ )
+
+ iters := make([]encoding.SeriesIterator, 0, predefinedSeriesCount)
+ for i := 0; i < predefinedSeriesCount; i++ {
+ m := metricsTag
+ if i > 5 {
+ m = metricsTag2
+ }
+ iters = append(iters, encoding.NewSeriesIterator(
+ encoding.SeriesIteratorOptions{
+ Namespace: ident.StringID("ns"),
+ Tags: ident.NewTagsIterator(m),
+ StartInclusive: xtime.ToUnixNano(query.Start),
+ EndExclusive: xtime.ToUnixNano(query.End),
+ }, nil))
+ }
+
+ seriesIterators := encoding.NewMockSeriesIterators(ctrl)
+ seriesIterators.EXPECT().Len().Return(predefinedSeriesCount).MinTimes(1)
+ seriesIterators.EXPECT().Iters().Return(iters).Times(1)
+ seriesIterators.EXPECT().Close()
+
+ seriesLoader := &testSeriesLoadHandler{seriesIterators}
+
+ return &querier{iteratorOpts: iteratorOpts, handler: seriesLoader}
+}
+
+func setupRandomGenQuerier(ctrl *gomock.Controller) (*querier, error) {
+ iters := encoding.NewMockSeriesIterators(ctrl)
+ iters.EXPECT().Len().Return(0).AnyTimes()
+
+ emptySeriesLoader := &testSeriesLoadHandler{iters}
+
+ return newQuerier(iteratorOpts, emptySeriesLoader, blockSize, defaultResolution, histogramBucketCount)
+}
diff --git a/src/cmd/services/m3comparator/main/series_load_handler.go b/src/cmd/services/m3comparator/main/series_load_handler.go
new file mode 100644
index 0000000000..35092e7b0e
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/series_load_handler.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "net/http"
+ "sync"
+
+ "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
+ "github.com/m3db/m3/src/dbnode/encoding"
+ xhttp "github.com/m3db/m3/src/x/net/http"
+
+ "go.uber.org/zap"
+)
+
+type seriesLoadHandler interface {
+ getSeriesIterators(string) (encoding.SeriesIterators, error)
+}
+
+type httpSeriesLoadHandler struct {
+ sync.RWMutex
+ reader parser.SeriesReader
+ iterOpts parser.Options
+}
+
+var (
+ _ http.Handler = (*httpSeriesLoadHandler)(nil)
+ _ seriesLoadHandler = (*httpSeriesLoadHandler)(nil)
+)
+
+// newHTTPSeriesLoadHandler builds a handler that can load series
+// to the comparator via an http endpoint.
+func newHTTPSeriesLoadHandler(iterOpts parser.Options) *httpSeriesLoadHandler {
+ return &httpSeriesLoadHandler{
+ iterOpts: iterOpts,
+ reader: parser.NewSeriesReader(iterOpts),
+ }
+}
+
+func (l *httpSeriesLoadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ logger := l.iterOpts.InstrumentOptions.Logger()
+ err := l.serveHTTP(r)
+ if err != nil {
+ logger.Error("unable to fetch data", zap.Error(err))
+ xhttp.Error(w, err, http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+}
+
+func (l *httpSeriesLoadHandler) getSeriesIterators(
+ name string) (encoding.SeriesIterators, error) {
+ return l.reader.SeriesIterators(name)
+}
+
+func (l *httpSeriesLoadHandler) serveHTTP(r *http.Request) error {
+ if r.Method == http.MethodDelete {
+ l.reader.Clear()
+ return nil
+ }
+
+ body := r.Body
+ defer body.Close()
+ return l.reader.Load(body)
+}
diff --git a/src/cmd/services/m3comparator/main/series_load_handler_test.go b/src/cmd/services/m3comparator/main/series_load_handler_test.go
new file mode 100644
index 0000000000..19c4d1ff12
--- /dev/null
+++ b/src/cmd/services/m3comparator/main/series_load_handler_test.go
@@ -0,0 +1,238 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
+ "github.com/m3db/m3/src/dbnode/encoding"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// NB: this is regression test data that used to cause issues.
+const seriesStr = `
+[
+ {
+ "start": "2020-03-30T11:39:45Z",
+ "end": "2020-03-30T11:58:00Z",
+ "tags": [
+ ["__name__", "series_name"],
+ ["abc", "def"],
+ ["tag_a", "foo"]
+ ],
+ "datapoints": [
+ { "val": "7076", "ts": "2020-03-30T11:39:51.288Z" },
+ { "val": "7076", "ts": "2020-03-30T11:39:57.478Z" },
+ { "val": "7076", "ts": "2020-03-30T11:40:07.478Z" },
+ { "val": "7076", "ts": "2020-03-30T11:40:18.886Z" },
+ { "val": "7076", "ts": "2020-03-30T11:40:31.135Z" },
+ { "val": "7077", "ts": "2020-03-30T11:40:40.047Z" },
+ { "val": "7077", "ts": "2020-03-30T11:40:54.893Z" },
+ { "val": "7077", "ts": "2020-03-30T11:40:57.478Z" },
+ { "val": "7077", "ts": "2020-03-30T11:41:07.478Z" },
+ { "val": "7077", "ts": "2020-03-30T11:41:17.478Z" },
+ { "val": "7077", "ts": "2020-03-30T11:41:29.323Z" },
+ { "val": "7078", "ts": "2020-03-30T11:41:43.873Z" },
+ { "val": "7078", "ts": "2020-03-30T11:41:54.375Z" },
+ { "val": "7078", "ts": "2020-03-30T11:41:58.053Z" },
+ { "val": "7078", "ts": "2020-03-30T11:42:09.250Z" },
+ { "val": "7078", "ts": "2020-03-30T11:42:20.793Z" },
+ { "val": "7078", "ts": "2020-03-30T11:42:34.915Z" },
+ { "val": "7079", "ts": "2020-03-30T11:42:43.467Z" },
+ { "val": "7079", "ts": "2020-03-30T11:42:50.364Z" },
+ { "val": "7079", "ts": "2020-03-30T11:43:02.376Z" },
+ { "val": "7079", "ts": "2020-03-30T11:43:07.478Z" },
+ { "val": "7079", "ts": "2020-03-30T11:43:20.807Z" },
+ { "val": "7079", "ts": "2020-03-30T11:43:29.432Z" },
+ { "val": "7079", "ts": "2020-03-30T11:43:37.478Z" },
+ { "val": "7080", "ts": "2020-03-30T11:43:47.478Z" },
+ { "val": "7080", "ts": "2020-03-30T11:44:01.078Z" },
+ { "val": "7080", "ts": "2020-03-30T11:44:07.478Z" },
+ { "val": "7080", "ts": "2020-03-30T11:44:17.478Z" },
+ { "val": "7080", "ts": "2020-03-30T11:44:28.444Z" },
+ { "val": "7080", "ts": "2020-03-30T11:44:37.478Z" },
+ { "val": "7081", "ts": "2020-03-30T11:44:49.607Z" },
+ { "val": "7081", "ts": "2020-03-30T11:45:02.758Z" },
+ { "val": "7081", "ts": "2020-03-30T11:45:16.740Z" },
+ { "val": "7081", "ts": "2020-03-30T11:45:27.813Z" },
+ { "val": "7081", "ts": "2020-03-30T11:45:38.141Z" },
+ { "val": "7082", "ts": "2020-03-30T11:45:53.850Z" },
+ { "val": "7082", "ts": "2020-03-30T11:46:00.954Z" },
+ { "val": "7082", "ts": "2020-03-30T11:46:08.814Z" },
+ { "val": "7082", "ts": "2020-03-30T11:46:17.478Z" },
+ { "val": "7082", "ts": "2020-03-30T11:46:27.478Z" },
+ { "val": "7082", "ts": "2020-03-30T11:46:38.152Z" },
+ { "val": "7083", "ts": "2020-03-30T11:46:48.192Z" },
+ { "val": "7084", "ts": "2020-03-30T11:47:40.871Z" },
+ { "val": "7084", "ts": "2020-03-30T11:47:49.966Z" },
+ { "val": "7084", "ts": "2020-03-30T11:47:57.478Z" },
+ { "val": "7084", "ts": "2020-03-30T11:48:07.478Z" },
+ { "val": "7084", "ts": "2020-03-30T11:48:23.279Z" },
+ { "val": "7084", "ts": "2020-03-30T11:48:29.018Z" },
+ { "val": "7084", "ts": "2020-03-30T11:48:37.478Z" },
+ { "val": "7085", "ts": "2020-03-30T11:48:47.478Z" },
+ { "val": "7085", "ts": "2020-03-30T11:48:57.478Z" },
+ { "val": "7085", "ts": "2020-03-30T11:49:07.478Z" },
+ { "val": "7085", "ts": "2020-03-30T11:49:17.478Z" },
+ { "val": "7085", "ts": "2020-03-30T11:49:27.478Z" },
+ { "val": "7085", "ts": "2020-03-30T11:49:37.478Z" },
+ { "val": "7086", "ts": "2020-03-30T11:49:47.478Z" },
+ { "val": "7086", "ts": "2020-03-30T11:49:57.850Z" },
+ { "val": "7086", "ts": "2020-03-30T11:50:07.478Z" },
+ { "val": "7086", "ts": "2020-03-30T11:50:20.887Z" },
+ { "val": "7087", "ts": "2020-03-30T11:51:12.729Z" },
+ { "val": "7087", "ts": "2020-03-30T11:51:19.914Z" },
+ { "val": "7087", "ts": "2020-03-30T11:51:27.478Z" },
+ { "val": "7087", "ts": "2020-03-30T11:51:37.478Z" },
+ { "val": "7088", "ts": "2020-03-30T11:51:47.478Z" },
+ { "val": "7088", "ts": "2020-03-30T11:51:57.478Z" },
+ { "val": "7088", "ts": "2020-03-30T11:52:07.478Z" },
+ { "val": "7088", "ts": "2020-03-30T11:52:17.478Z" },
+ { "val": "7088", "ts": "2020-03-30T11:52:29.869Z" },
+ { "val": "7088", "ts": "2020-03-30T11:52:38.976Z" },
+ { "val": "7089", "ts": "2020-03-30T11:52:47.478Z" },
+ { "val": "7089", "ts": "2020-03-30T11:52:57.478Z" },
+ { "val": "7089", "ts": "2020-03-30T11:53:07.478Z" },
+ { "val": "7089", "ts": "2020-03-30T11:53:17.906Z" },
+ { "val": "7089", "ts": "2020-03-30T11:53:27.478Z" },
+ { "val": "7090", "ts": "2020-03-30T11:54:17.478Z" },
+ { "val": "7090", "ts": "2020-03-30T11:54:27.478Z" },
+ { "val": "7090", "ts": "2020-03-30T11:54:37.478Z" },
+ { "val": "7091", "ts": "2020-03-30T11:54:51.214Z" },
+ { "val": "7091", "ts": "2020-03-30T11:54:58.985Z" },
+ { "val": "7091", "ts": "2020-03-30T11:55:08.548Z" },
+ { "val": "7091", "ts": "2020-03-30T11:55:19.762Z" },
+ { "val": "7091", "ts": "2020-03-30T11:55:27.478Z" },
+ { "val": "7091", "ts": "2020-03-30T11:55:39.009Z" },
+ { "val": "7092", "ts": "2020-03-30T11:55:47.478Z" },
+ { "val": "7092", "ts": "2020-03-30T11:56:01.507Z" },
+ { "val": "7092", "ts": "2020-03-30T11:56:12.995Z" },
+ { "val": "7092", "ts": "2020-03-30T11:56:24.892Z" },
+ { "val": "7092", "ts": "2020-03-30T11:56:38.410Z" },
+ { "val": "7093", "ts": "2020-03-30T11:56:47.478Z" },
+ { "val": "7093", "ts": "2020-03-30T11:56:58.786Z" },
+ { "val": "7093", "ts": "2020-03-30T11:57:07.478Z" },
+ { "val": "7093", "ts": "2020-03-30T11:57:17.478Z" },
+ { "val": "7093", "ts": "2020-03-30T11:57:31.283Z" },
+ { "val": "7093", "ts": "2020-03-30T11:57:39.113Z" },
+ { "val": "7094", "ts": "2020-03-30T11:57:48.864Z" },
+ { "val": "7094", "ts": "2020-03-30T11:57:57.478Z" }
+ ]
+ }
+]`
+
+func TestIngestSeries(t *testing.T) {
+ opts := parser.Options{
+ EncoderPool: encoderPool,
+ IteratorPools: iterPools,
+ TagOptions: tagOptions,
+ InstrumentOptions: iOpts,
+ }
+
+ req, err := http.NewRequest(http.MethodPost, "", strings.NewReader(seriesStr))
+ require.NoError(t, err)
+
+ recorder := httptest.NewRecorder()
+
+ handler := newHTTPSeriesLoadHandler(opts)
+ handler.ServeHTTP(recorder, req)
+
+ assert.Equal(t, http.StatusOK, recorder.Code)
+
+ iters, err := handler.getSeriesIterators("series_name")
+ require.NoError(t, err)
+ require.NotNil(t, iters)
+
+ expectedList := make([]parser.Series, 0, 10)
+ err = json.Unmarshal([]byte(seriesStr), &expectedList)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(expectedList))
+ expected := expectedList[0]
+
+ require.Equal(t, 1, len(iters.Iters()))
+ it := iters.Iters()[0]
+ j := 0
+ for it.Next() {
+ c, _, _ := it.Current()
+ ts := c.Timestamp.UTC()
+ ex := expected.Datapoints[j]
+ assert.Equal(t, ex.Timestamp, ts)
+ assert.Equal(t, float64(ex.Value), c.Value)
+ j++
+ }
+
+ assert.NoError(t, it.Err())
+ assert.Equal(t, expected.Tags, readTags(it))
+ assert.Equal(t, j, len(expected.Datapoints))
+}
+
+func TestClearData(t *testing.T) {
+ opts := parser.Options{
+ EncoderPool: encoderPool,
+ IteratorPools: iterPools,
+ TagOptions: tagOptions,
+ InstrumentOptions: iOpts,
+ }
+
+ req, err := http.NewRequest(http.MethodPost, "", strings.NewReader(seriesStr))
+ require.NoError(t, err)
+
+ recorder := httptest.NewRecorder()
+
+ handler := newHTTPSeriesLoadHandler(opts)
+ handler.ServeHTTP(recorder, req)
+
+ assert.Equal(t, http.StatusOK, recorder.Code)
+
+ iters, err := handler.getSeriesIterators("series_name")
+ require.NoError(t, err)
+ require.Equal(t, 1, len(iters.Iters()))
+
+ // Call clear data
+ req, err = http.NewRequest(http.MethodDelete, "", nil)
+ require.NoError(t, err)
+
+ handler.ServeHTTP(recorder, req)
+ assert.Equal(t, http.StatusOK, recorder.Code)
+
+ iters, err = handler.getSeriesIterators("series_name")
+ require.NoError(t, err)
+ require.Nil(t, iters)
+}
+
+func readTags(it encoding.SeriesIterator) parser.Tags {
+ tagIter := it.Tags()
+ tags := make(parser.Tags, 0, tagIter.Len())
+ for tagIter.Next() {
+ tag := tagIter.Current()
+ newTag := parser.NewTag(tag.Name.String(), tag.Value.String())
+ tags = append(tags, newTag)
+ }
+
+ return tags
+}
diff --git a/src/cmd/services/m3coordinator/downsample/downsample_mock.go b/src/cmd/services/m3coordinator/downsample/downsample_mock.go
index 9c5cb347a5..2d04b1a4c8 100644
--- a/src/cmd/services/m3coordinator/downsample/downsample_mock.go
+++ b/src/cmd/services/m3coordinator/downsample/downsample_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/cmd/services/m3coordinator/downsample (interfaces: Downsampler,MetricsAppender,SamplesAppender)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -116,23 +116,23 @@ func (mr *MockMetricsAppenderMockRecorder) Finalize() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Finalize", reflect.TypeOf((*MockMetricsAppender)(nil).Finalize))
}
-// Reset mocks base method
-func (m *MockMetricsAppender) Reset() {
+// NextMetric mocks base method
+func (m *MockMetricsAppender) NextMetric() {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "Reset")
+ m.ctrl.Call(m, "NextMetric")
}
-// Reset indicates an expected call of Reset
-func (mr *MockMetricsAppenderMockRecorder) Reset() *gomock.Call {
+// NextMetric indicates an expected call of NextMetric
+func (mr *MockMetricsAppenderMockRecorder) NextMetric() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockMetricsAppender)(nil).Reset))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextMetric", reflect.TypeOf((*MockMetricsAppender)(nil).NextMetric))
}
// SamplesAppender mocks base method
-func (m *MockMetricsAppender) SamplesAppender(arg0 SampleAppenderOptions) (SamplesAppender, error) {
+func (m *MockMetricsAppender) SamplesAppender(arg0 SampleAppenderOptions) (SamplesAppenderResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SamplesAppender", arg0)
- ret0, _ := ret[0].(SamplesAppender)
+ ret0, _ := ret[0].(SamplesAppenderResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -221,3 +221,17 @@ func (mr *MockSamplesAppenderMockRecorder) AppendGaugeTimedSample(arg0, arg1 int
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendGaugeTimedSample", reflect.TypeOf((*MockSamplesAppender)(nil).AppendGaugeTimedSample), arg0, arg1)
}
+
+// AppendTimerTimedSample mocks base method
+func (m *MockSamplesAppender) AppendTimerTimedSample(arg0 time.Time, arg1 float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AppendTimerTimedSample", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AppendTimerTimedSample indicates an expected call of AppendTimerTimedSample
+func (mr *MockSamplesAppenderMockRecorder) AppendTimerTimedSample(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendTimerTimedSample", reflect.TypeOf((*MockSamplesAppender)(nil).AppendTimerTimedSample), arg0, arg1)
+}
diff --git a/src/cmd/services/m3coordinator/downsample/downsampler.go b/src/cmd/services/m3coordinator/downsample/downsampler.go
index aa57564ff0..0be8515118 100644
--- a/src/cmd/services/m3coordinator/downsample/downsampler.go
+++ b/src/cmd/services/m3coordinator/downsample/downsampler.go
@@ -23,7 +23,8 @@ package downsample
import (
"time"
- "go.uber.org/zap"
+ "github.com/m3db/m3/src/query/ts"
+
"go.uber.org/zap/zapcore"
)
@@ -35,17 +36,29 @@ type Downsampler interface {
// MetricsAppender is a metrics appender that can build a samples
// appender, only valid to use with a single caller at a time.
type MetricsAppender interface {
+ // NextMetric progresses to building the next metric.
+ NextMetric()
+ // AddTag adds a tag to the current metric being built.
AddTag(name, value []byte)
- SamplesAppender(opts SampleAppenderOptions) (SamplesAppender, error)
- Reset()
+ // SamplesAppender returns a samples appender for the current
+ // metric built with the tags that have been set.
+ SamplesAppender(opts SampleAppenderOptions) (SamplesAppenderResult, error)
+ // Finalize finalizes the entire metrics appender for reuse.
Finalize()
}
+// SamplesAppenderResult is the result from a SamplesAppender call.
+type SamplesAppenderResult struct {
+ SamplesAppender SamplesAppender
+ IsDropPolicyApplied bool
+}
+
// SampleAppenderOptions defines the options being used when constructing
// the samples appender for a metric.
type SampleAppenderOptions struct {
Override bool
OverrideRules SamplesAppenderOverrideRules
+ MetricType ts.MetricType
}
// SamplesAppenderOverrideRules provides override rules to
@@ -62,14 +75,13 @@ type SamplesAppender interface {
AppendGaugeSample(value float64) error
AppendCounterTimedSample(t time.Time, value int64) error
AppendGaugeTimedSample(t time.Time, value float64) error
+ AppendTimerTimedSample(t time.Time, value float64) error
}
type downsampler struct {
- opts DownsamplerOptions
- agg agg
-
- debugLogging bool
- logger *zap.Logger
+ opts DownsamplerOptions
+ agg agg
+ metricsAppenderOpts metricsAppenderOptions
}
type downsamplerOptions struct {
@@ -88,24 +100,28 @@ func newDownsampler(opts downsamplerOptions) (*downsampler, error) {
debugLogging = true
}
+ metricsAppenderOpts := metricsAppenderOptions{
+ agg: opts.agg.aggregator,
+ clientRemote: opts.agg.clientRemote,
+ defaultStagedMetadatasProtos: opts.agg.defaultStagedMetadatasProtos,
+ clockOpts: opts.agg.clockOpts,
+ tagEncoderPool: opts.agg.pools.tagEncoderPool,
+ matcher: opts.agg.matcher,
+ metricTagsIteratorPool: opts.agg.pools.metricTagsIteratorPool,
+ debugLogging: debugLogging,
+ logger: logger,
+ augmentM3Tags: opts.agg.m3PrefixFilter,
+ }
+
return &downsampler{
- opts: opts.opts,
- agg: opts.agg,
- debugLogging: debugLogging,
- logger: logger,
+ opts: opts.opts,
+ agg: opts.agg,
+ metricsAppenderOpts: metricsAppenderOpts,
}, nil
}
func (d *downsampler) NewMetricsAppender() (MetricsAppender, error) {
- return newMetricsAppender(metricsAppenderOptions{
- agg: d.agg.aggregator,
- clientRemote: d.agg.clientRemote,
- defaultStagedMetadatas: d.agg.defaultStagedMetadatas,
- clockOpts: d.agg.clockOpts,
- tagEncoder: d.agg.pools.tagEncoderPool.Get(),
- matcher: d.agg.matcher,
- metricTagsIteratorPool: d.agg.pools.metricTagsIteratorPool,
- debugLogging: d.debugLogging,
- logger: d.logger,
- }), nil
+ metricsAppender := d.agg.pools.metricsAppenderPool.Get()
+ metricsAppender.reset(d.metricsAppenderOpts)
+ return metricsAppender, nil
}
diff --git a/src/cmd/services/m3coordinator/downsample/downsampler_test.go b/src/cmd/services/m3coordinator/downsample/downsampler_test.go
index c6d41f2849..6d3ea5bfb2 100644
--- a/src/cmd/services/m3coordinator/downsample/downsampler_test.go
+++ b/src/cmd/services/m3coordinator/downsample/downsampler_test.go
@@ -41,13 +41,18 @@ import (
"github.com/m3db/m3/src/metrics/rules"
ruleskv "github.com/m3db/m3/src/metrics/rules/store/kv"
"github.com/m3db/m3/src/metrics/rules/view"
+ "github.com/m3db/m3/src/metrics/transformation"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/storage/mock"
+ "github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -160,10 +165,10 @@ func TestDownsamplerAggregationWithRulesConfigMappingRules(t *testing.T) {
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
- tags: gaugeMetric.tags,
- value: 30,
- attributes: &storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ tags: gaugeMetric.tags,
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
@@ -176,7 +181,639 @@ func TestDownsamplerAggregationWithRulesConfigMappingRules(t *testing.T) {
testDownsamplerAggregation(t, testDownsampler)
}
-func TestDownsamplerAggregationWithRulesConfigRollupRules(t *testing.T) {
+func TestDownsamplerAggregationWithRulesConfigMappingRulesPartialReplaceAutoMappingRule(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ nameTag: "foo_metric",
+ "app": "nginx_edge",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ autoMappingRules: []AutoMappingRule{
+ {
+ Aggregations: []aggregation.Type{aggregation.Sum},
+ Policies: policy.StoragePolicies{
+ policy.MustParseStoragePolicy("2s:24h"),
+ policy.MustParseStoragePolicy("4s:48h"),
+ },
+ },
+ },
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "app:nginx*",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ },
+ },
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ // Expect the max to be used and override the default auto
+ // mapping rule for the storage policy 2s:24h.
+ {
+ tags: gaugeMetric.tags,
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ },
+ },
+ // Expect the sum to still be used for the storage
+ // policy 4s:48h.
+ {
+ tags: gaugeMetric.tags,
+ values: []expectedValue{{value: 60}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 4 * time.Second,
+ Retention: 48 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesReplaceAutoMappingRule(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ nameTag: "foo_metric",
+ "app": "nginx_edge",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ autoMappingRules: []AutoMappingRule{
+ {
+ Aggregations: []aggregation.Type{aggregation.Sum},
+ Policies: policy.StoragePolicies{
+ policy.MustParseStoragePolicy("2s:24h"),
+ },
+ },
+ },
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "app:nginx*",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ },
+ },
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ // Expect the max to be used and override the default auto
+ // mapping rule for the storage policy 2s:24h.
+ {
+ tags: gaugeMetric.tags,
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesNoNameTag(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "app": "nginx_edge",
+ "endpoint": "health",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "endpoint",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "app:nginx*",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: gaugeMetric.tags,
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilter(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "app": "nginx_edge",
+ "endpoint": "health",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "endpoint",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__m3_type__:counter",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ },
+ sampleAppenderOpts: &SampleAppenderOptions{
+ MetricType: ts.MetricTypeCounter,
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ "app": "nginx_edge",
+ "endpoint": "health",
+ },
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilterNoMatch(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "app": "nginx_edge",
+ "endpoint": "health",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "endpoint",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__m3_type__:counter",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ },
+ sampleAppenderOpts: &SampleAppenderOptions{
+ MetricType: ts.MetricTypeGauge,
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{},
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesAggregationType(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "__g0__": "nginx_edge",
+ "__g1__": "health",
+ "__option_id_scheme__": "graphite",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ tags := []Tag{{Name: "__m3_graphite_aggregation__"}}
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "__g2__",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__m3_type__:gauge",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ Tags: tags,
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ "__g0__": "nginx_edge",
+ "__g1__": "health",
+ "__g2__": "Max",
+ },
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesMultipleAggregationType(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "__g0__": "nginx_edge",
+ "__g1__": "health",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ tags := []Tag{{Name: "__m3_graphite_aggregation__"}}
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "__g2__",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__m3_type__:gauge",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ Tags: tags,
+ },
+ {
+ Filter: "__m3_type__:gauge",
+ Aggregations: []aggregation.Type{aggregation.Sum},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ Tags: tags,
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ "__g0__": "nginx_edge",
+ "__g1__": "health",
+ "__g2__": "Max",
+ },
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ {
+ tags: map[string]string{
+ "__g0__": "nginx_edge",
+ "__g1__": "health",
+ "__g2__": "Sum",
+ },
+ values: []expectedValue{{value: 60}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixAndAggregationTags(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "__g0__": "nginx_edge",
+ "__g1__": "health",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ tags := []Tag{
+ {Name: "__m3_graphite_aggregation__"},
+ {Name: "__m3_graphite_prefix__", Value: "stats.counter"},
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "__g4__",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__m3_type__:gauge",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ Tags: tags,
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ "__g0__": "stats",
+ "__g1__": "counter",
+ "__g2__": "nginx_edge",
+ "__g3__": "health",
+ "__g4__": "Max",
+ },
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixTag(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "__g0__": "nginx_edge",
+ "__g1__": "health",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ tags := []Tag{
+ {Name: "__m3_graphite_prefix__", Value: "stats.counter"},
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "__g3__",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__m3_type__:gauge",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ Tags: tags,
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ "__g0__": "stats",
+ "__g1__": "counter",
+ "__g2__": "nginx_edge",
+ "__g3__": "health",
+ },
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigMappingRulesAugmentTag(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "app": "nginx_edge",
+ "endpoint": "health",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
+ },
+ }
+ tags := []Tag{
+ {Name: "datacenter", Value: "abc"},
+ }
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "app",
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "app:nginx*",
+ Aggregations: []aggregation.Type{aggregation.Max},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ Tags: tags,
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ "app": "nginx_edge",
+ "endpoint": "health",
+ "datacenter": "abc",
+ },
+ values: []expectedValue{{value: 30}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: 5 * time.Second,
+ Retention: 30 * 24 * time.Hour,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigRollupRulesNoNameTag(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ "not_rolled_up": "not_rolled_up_value",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 42},
+ {value: 64, offset: 5 * time.Second},
+ },
+ }
+ res := 5 * time.Second
+ ret := 30 * 24 * time.Hour
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ identTag: "endpoint",
+ rulesConfig: &RulesConfiguration{
+ RollupRules: []RollupRuleConfiguration{
+ {
+ Filter: fmt.Sprintf(
+ "%s:http_requests app:* status_code:* endpoint:*",
+ nameTag),
+ Transforms: []TransformConfiguration{
+ {
+ Transform: &TransformOperationConfiguration{
+ Type: transformation.PerSecond,
+ },
+ },
+ {
+ Rollup: &RollupOperationConfiguration{
+ MetricName: "http_requests_by_status_code",
+ GroupBy: []string{"app", "status_code", "endpoint"},
+ Aggregations: []aggregation.Type{aggregation.Sum},
+ },
+ },
+ },
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: res,
+ Retention: ret,
+ },
+ },
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{},
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigRollupRulesPerSecondSum(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
@@ -185,19 +822,107 @@ func TestDownsamplerAggregationWithRulesConfigRollupRules(t *testing.T) {
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
- samples: []float64{42, 64},
- // TODO: Make rollup rules work with timestamped samples (like below)
- // instead of only with untimed samples (this requires being able to
- // write staged metadatas instead of a single storage policy for a
- // timed metric).
- // timedSamples: []testGaugeMetricTimedSample{
- // {value: 42}, {value: 64},
- // },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 42},
+ {value: 64, offset: 5 * time.Second},
+ },
+ }
+ res := 5 * time.Second
+ ret := 30 * 24 * time.Hour
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ rulesConfig: &RulesConfiguration{
+ RollupRules: []RollupRuleConfiguration{
+ {
+ Filter: fmt.Sprintf(
+ "%s:http_requests app:* status_code:* endpoint:*",
+ nameTag),
+ Transforms: []TransformConfiguration{
+ {
+ Transform: &TransformOperationConfiguration{
+ Type: transformation.PerSecond,
+ },
+ },
+ {
+ Rollup: &RollupOperationConfiguration{
+ MetricName: "http_requests_by_status_code",
+ GroupBy: []string{"app", "status_code", "endpoint"},
+ Aggregations: []aggregation.Type{aggregation.Sum},
+ },
+ },
+ },
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: res,
+ Retention: ret,
+ },
+ },
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ nameTag: "http_requests_by_status_code",
+ string(rollupTagName): string(rollupTagValue),
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ },
+ values: []expectedValue{{value: 4.4}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: res,
+ Retention: ret,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigRollupRulesIncreaseAdd(t *testing.T) {
+ gaugeMetrics := []testGaugeMetric{
+ testGaugeMetric{
+ tags: map[string]string{
+ nameTag: "http_requests",
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ "not_rolled_up": "not_rolled_up_value_1",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 42, offset: 5 * time.Second}, // +42 (should not be accounted since is a reset)
+ // Explicit no value.
+ {value: 12, offset: 15 * time.Second}, // +12 - simulate a reset (should not be accounted)
+ {value: 33, offset: 20 * time.Second}, // +21
+ },
+ },
+ testGaugeMetric{
+ tags: map[string]string{
+ nameTag: "http_requests",
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ "not_rolled_up": "not_rolled_up_value_2",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 13, offset: 5 * time.Second}, // +13 (should not be accounted since is a reset)
+ {value: 27, offset: 10 * time.Second}, // +14
+ // Explicit no value.
+ {value: 42, offset: 20 * time.Second}, // +15
+ },
+ },
}
res := 5 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
- instrumentOpts: instrument.NewTestOptions(t),
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
@@ -205,15 +930,99 @@ func TestDownsamplerAggregationWithRulesConfigRollupRules(t *testing.T) {
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
- // TODO: make multi-stage rollup rules work, for some reason
- // when multiple transforms applied the HasRollup detection
- // fails and hence metric is not forwarded for second stage
- // aggregation.
- // {
- // Transform: &TransformOperationConfiguration{
- // Type: transformation.PerSecond,
- // },
- // },
+ {
+ Transform: &TransformOperationConfiguration{
+ Type: transformation.Increase,
+ },
+ },
+ {
+ Rollup: &RollupOperationConfiguration{
+ MetricName: "http_requests_by_status_code",
+ GroupBy: []string{"app", "status_code", "endpoint"},
+ Aggregations: []aggregation.Type{aggregation.Sum},
+ },
+ },
+ {
+ Transform: &TransformOperationConfiguration{
+ Type: transformation.Add,
+ },
+ },
+ },
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: res,
+ Retention: ret,
+ },
+ },
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: gaugeMetrics,
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ {
+ tags: map[string]string{
+ nameTag: "http_requests_by_status_code",
+ string(rollupTagName): string(rollupTagValue),
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ },
+ values: []expectedValue{
+ {value: 14},
+ {value: 50, offset: 10 * time.Second},
+ },
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: res,
+ Retention: ret,
+ },
+ },
+ },
+ },
+ })
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
+func TestDownsamplerAggregationWithRulesConfigRollupRuleAndDropPolicy(t *testing.T) {
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ nameTag: "http_requests",
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ "not_rolled_up": "not_rolled_up_value",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 42},
+ {value: 64, offset: 5 * time.Second},
+ },
+ expectDropPolicyApplied: true,
+ }
+ res := 5 * time.Second
+ ret := 30 * 24 * time.Hour
+ filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag)
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: filter,
+ Drop: true,
+ },
+ },
+ RollupRules: []RollupRuleConfiguration{
+ {
+ Filter: filter,
+ Transforms: []TransformConfiguration{
+ {
+ Transform: &TransformOperationConfiguration{
+ Type: transformation.PerSecond,
+ },
+ },
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
@@ -244,9 +1053,9 @@ func TestDownsamplerAggregationWithRulesConfigRollupRules(t *testing.T) {
"status_code": "500",
"endpoint": "/foo/bar",
},
- value: 106,
- attributes: &storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ values: []expectedValue{{value: 4.4}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
@@ -267,12 +1076,6 @@ func TestDownsamplerAggregationWithTimedSamples(t *testing.T) {
timedSamples: true,
})
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
- autoMappingRules: []AutoMappingRule{
- {
- Aggregations: []aggregation.Type{testAggregationType},
- Policies: testAggregationStoragePolicies,
- },
- },
ingest: &testDownsamplerOptionsIngest{
counterMetrics: counterMetrics,
gaugeMetrics: gaugeMetrics,
@@ -280,6 +1083,20 @@ func TestDownsamplerAggregationWithTimedSamples(t *testing.T) {
expect: &testDownsamplerOptionsExpect{
writes: append(counterMetricsExpect, gaugeMetricsExpect...),
},
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__name__:*",
+ Aggregations: []aggregation.Type{testAggregationType},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ },
+ },
+ },
+ },
+ },
})
// Test expected output
@@ -288,10 +1105,10 @@ func TestDownsamplerAggregationWithTimedSamples(t *testing.T) {
func TestDownsamplerAggregationWithOverrideRules(t *testing.T) {
counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{})
- counterMetricsExpect[0].value = 2
+ counterMetricsExpect[0].values = []expectedValue{{value: 2}}
gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{})
- gaugeMetricsExpect[0].value = 5
+ gaugeMetricsExpect[0].values = []expectedValue{{value: 5}}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
sampleAppenderOpts: &SampleAppenderOptions{
@@ -307,10 +1124,18 @@ func TestDownsamplerAggregationWithOverrideRules(t *testing.T) {
},
},
},
- autoMappingRules: []AutoMappingRule{
- {
- Aggregations: []aggregation.Type{testAggregationType},
- Policies: testAggregationStoragePolicies,
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__name__:*",
+ Aggregations: []aggregation.Type{testAggregationType},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ },
+ },
+ },
},
},
ingest: &testDownsamplerOptionsIngest{
@@ -327,7 +1152,7 @@ func TestDownsamplerAggregationWithOverrideRules(t *testing.T) {
}
func TestDownsamplerAggregationWithRemoteAggregatorClient(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
// Create mock client
@@ -335,10 +1160,18 @@ func TestDownsamplerAggregationWithRemoteAggregatorClient(t *testing.T) {
remoteClientMock.EXPECT().Init().Return(nil)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
- autoMappingRules: []AutoMappingRule{
- {
- Aggregations: []aggregation.Type{testAggregationType},
- Policies: testAggregationStoragePolicies,
+ rulesConfig: &RulesConfiguration{
+ MappingRules: []MappingRuleConfiguration{
+ {
+ Filter: "__name__:*",
+ Aggregations: []aggregation.Type{testAggregationType},
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ },
+ },
+ },
},
},
remoteClientMock: remoteClientMock,
@@ -349,31 +1182,41 @@ func TestDownsamplerAggregationWithRemoteAggregatorClient(t *testing.T) {
}
type testExpectedWrite struct {
- tags map[string]string
- value float64
- attributes *storage.Attributes
+ tags map[string]string
+ values []expectedValue // use values for multi expected values
+ valueAllowedError float64 // use for allowing for slightly inexact values due to timing, etc
+ attributes *storagemetadata.Attributes
+}
+
+type expectedValue struct {
+ offset time.Duration
+ value float64
}
type testCounterMetric struct {
- tags map[string]string
- samples []int64
- timedSamples []testCounterMetricTimedSample
+ tags map[string]string
+ samples []int64
+ timedSamples []testCounterMetricTimedSample
+ expectDropPolicyApplied bool
}
type testCounterMetricTimedSample struct {
- time time.Time
- value int64
+ time time.Time
+ offset time.Duration
+ value int64
}
type testGaugeMetric struct {
- tags map[string]string
- samples []float64
- timedSamples []testGaugeMetricTimedSample
+ tags map[string]string
+ samples []float64
+ timedSamples []testGaugeMetricTimedSample
+ expectDropPolicyApplied bool
}
type testGaugeMetricTimedSample struct {
- time time.Time
- value float64
+ time time.Time
+ offset time.Duration
+ value float64
}
type testCounterMetricsOptions struct {
@@ -395,8 +1238,8 @@ func testCounterMetrics(opts testCounterMetricsOptions) (
}
}
write := testExpectedWrite{
- tags: metric.tags,
- value: 6,
+ tags: metric.tags,
+ values: []expectedValue{{value: 6}},
}
return []testCounterMetric{metric}, []testExpectedWrite{write}
}
@@ -417,8 +1260,8 @@ func testGaugeMetrics(opts testGaugeMetricsOptions) ([]testGaugeMetric, []testEx
}
}
write := testExpectedWrite{
- tags: metric.tags,
- value: 15,
+ tags: metric.tags,
+ values: []expectedValue{{value: 15}},
}
return []testGaugeMetric{metric}, []testExpectedWrite{write}
}
@@ -453,26 +1296,51 @@ func testDownsamplerAggregation(
logger.Info("wait for test metrics to appear")
logWritesAccumulated := os.Getenv("TEST_LOG_WRITES_ACCUMULATED") == "true"
logWritesAccumulatedTicker := time.NewTicker(time.Second)
+
+ logWritesMatch := os.Getenv("TEST_LOG_WRITES_MATCH") == "true"
+ logWritesMatchTicker := time.NewTicker(time.Second)
+
+ identTag := nameTag
+ if len(testDownsampler.testOpts.identTag) > 0 {
+ identTag = testDownsampler.testOpts.identTag
+ }
+
CheckAllWritesArrivedLoop:
for {
- writes := testDownsampler.storage.Writes()
+ allWrites := testDownsampler.storage.Writes()
if logWritesAccumulated {
select {
case <-logWritesAccumulatedTicker.C:
logger.Info("logging accmulated writes",
- zap.Int("numWrites", len(writes)))
- for _, write := range writes {
+ zap.Int("numAllWrites", len(allWrites)))
+ for _, write := range allWrites {
logger.Info("accumulated write",
- zap.ByteString("tags", write.Tags.ID()),
- zap.Any("datapoints", write.Datapoints))
+ zap.ByteString("tags", write.Tags().ID()),
+ zap.Any("datapoints", write.Datapoints),
+ zap.Any("attributes", write.Attributes))
}
default:
}
}
for _, expectedWrite := range expectedWrites {
- name := expectedWrite.tags[nameTag]
- if _, ok := findWrite(t, writes, name); !ok {
+ name := expectedWrite.tags[identTag]
+ attrs := expectedWrite.attributes
+ writesForNameAndAttrs, _ := findWrites(allWrites, name, identTag, attrs)
+ if len(writesForNameAndAttrs) != len(expectedWrite.values) {
+ if logWritesMatch {
+ select {
+ case <-logWritesMatchTicker.C:
+ logger.Info("continuing wait for accumulated writes",
+ zap.String("name", name),
+ zap.Any("attributes", attrs),
+ zap.Int("numWritesForNameAndAttrs", len(writesForNameAndAttrs)),
+ zap.Int("numExpectedWriteValues", len(expectedWrite.values)),
+ )
+ default:
+ }
+ }
+
time.Sleep(100 * time.Millisecond)
continue CheckAllWritesArrivedLoop
}
@@ -482,29 +1350,57 @@ CheckAllWritesArrivedLoop:
// Verify writes
logger.Info("verify test metrics")
- writes := testDownsampler.storage.Writes()
+ allWrites := testDownsampler.storage.Writes()
if logWritesAccumulated {
logger.Info("logging accmulated writes to verify",
- zap.Int("numWrites", len(writes)))
- for _, write := range writes {
+ zap.Int("numAllWrites", len(allWrites)))
+ for _, write := range allWrites {
logger.Info("accumulated write",
- zap.ByteString("tags", write.Tags.ID()),
- zap.Any("datapoints", write.Datapoints))
+ zap.ByteString("tags", write.Tags().ID()),
+ zap.Any("datapoints", write.Datapoints()))
}
}
for _, expectedWrite := range expectedWrites {
- name := expectedWrite.tags[nameTag]
- value := expectedWrite.value
+ name := expectedWrite.tags[identTag]
+ expectedValues := expectedWrite.values
+ allowedError := expectedWrite.valueAllowedError
- write, found := findWrite(t, writes, name)
+ writesForNameAndAttrs, found := findWrites(allWrites, name, identTag, expectedWrite.attributes)
require.True(t, found)
- assert.Equal(t, expectedWrite.tags, tagsToStringMap(write.Tags))
- require.Equal(t, 1, len(write.Datapoints))
- assert.Equal(t, float64(value), write.Datapoints[0].Value)
+ require.Equal(t, len(expectedValues), len(writesForNameAndAttrs))
+ for i, expectedValue := range expectedValues {
+ write := writesForNameAndAttrs[i]
+
+ assert.Equal(t, expectedWrite.tags, tagsToStringMap(write.Tags()))
+
+ require.Equal(t, 1, len(write.Datapoints()))
+
+ actualValue := write.Datapoints()[0].Value
+ if allowedError == 0 {
+ // Exact match value.
+ assert.Equal(t, expectedValue.value, actualValue)
+ } else {
+ // Fuzzy match value.
+ lower := expectedValue.value - allowedError
+ upper := expectedValue.value + allowedError
+ withinBounds := (lower <= actualValue) && (actualValue <= upper)
+ msg := fmt.Sprintf("expected within: lower=%f, upper=%f, actual=%f",
+ lower, upper, actualValue)
+ assert.True(t, withinBounds, msg)
+ }
+
+ if expectedOffset := expectedValue.offset; expectedOffset > 0 {
+ // Check if distance between datapoints as expected (use
+ // absolute offset from first write).
+ firstTimestamp := writesForNameAndAttrs[0].Datapoints()[0].Timestamp
+ actualOffset := write.Datapoints()[0].Timestamp.Sub(firstTimestamp)
+ assert.Equal(t, expectedOffset, actualOffset)
+ }
- if attrs := expectedWrite.attributes; attrs != nil {
- assert.Equal(t, *attrs, write.Attributes)
+ if attrs := expectedWrite.attributes; attrs != nil {
+ assert.Equal(t, *attrs, write.Attributes())
+ }
}
}
}
@@ -629,43 +1525,57 @@ func testDownsamplerAggregationIngest(
opts = *testOpts.sampleAppenderOpts
}
for _, metric := range testCounterMetrics {
- appender.Reset()
+ appender.NextMetric()
+
for name, value := range metric.tags {
appender.AddTag([]byte(name), []byte(value))
}
- samplesAppender, err := appender.SamplesAppender(opts)
+ samplesAppenderResult, err := appender.SamplesAppender(opts)
require.NoError(t, err)
+ require.Equal(t, metric.expectDropPolicyApplied,
+ samplesAppenderResult.IsDropPolicyApplied)
+ samplesAppender := samplesAppenderResult.SamplesAppender
for _, sample := range metric.samples {
err = samplesAppender.AppendCounterSample(sample)
require.NoError(t, err)
}
for _, sample := range metric.timedSamples {
- if sample.time.Equal(time.Time{}) {
+ if sample.time.IsZero() {
sample.time = time.Now() // Allow empty time to mean "now"
}
+ if sample.offset > 0 {
+ sample.time = sample.time.Add(sample.offset)
+ }
err = samplesAppender.AppendCounterTimedSample(sample.time, sample.value)
require.NoError(t, err)
}
}
for _, metric := range testGaugeMetrics {
- appender.Reset()
+ appender.NextMetric()
+
for name, value := range metric.tags {
appender.AddTag([]byte(name), []byte(value))
}
- samplesAppender, err := appender.SamplesAppender(opts)
+ samplesAppenderResult, err := appender.SamplesAppender(opts)
require.NoError(t, err)
+ require.Equal(t, metric.expectDropPolicyApplied,
+ samplesAppenderResult.IsDropPolicyApplied)
+ samplesAppender := samplesAppenderResult.SamplesAppender
for _, sample := range metric.samples {
err = samplesAppender.AppendGaugeSample(sample)
require.NoError(t, err)
}
for _, sample := range metric.timedSamples {
- if sample.time.Equal(time.Time{}) {
+ if sample.time.IsZero() {
sample.time = time.Now() // Allow empty time to mean "now"
}
+ if sample.offset > 0 {
+ sample.time = sample.time.Add(sample.offset)
+ }
err = samplesAppender.AppendGaugeTimedSample(sample.time, sample.value)
require.NoError(t, err)
}
@@ -694,6 +1604,7 @@ type testDownsampler struct {
type testDownsamplerOptions struct {
clockOpts clock.Options
instrumentOpts instrument.Options
+ identTag string
// Options for the test
autoMappingRules []AutoMappingRule
@@ -724,7 +1635,8 @@ func newTestDownsampler(t *testing.T, opts testDownsamplerOptions) testDownsampl
clockOpts = opts.clockOpts
}
- instrumentOpts := instrument.NewOptions()
+ // Use a test instrument options by default to get the debug logs on by default.
+ instrumentOpts := instrument.NewTestOptions(t)
if opts.instrumentOpts != nil {
instrumentOpts = opts.instrumentOpts
}
@@ -741,15 +1653,22 @@ func newTestDownsampler(t *testing.T, opts testDownsamplerOptions) testDownsampl
rulesStore := ruleskv.NewStore(rulesKVStore, rulesStoreOpts)
tagEncoderOptions := serialize.NewTagEncoderOptions()
- tagDecoderOptions := serialize.NewTagDecoderOptions()
+ tagDecoderOptions := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{})
tagEncoderPoolOptions := pool.NewObjectPoolOptions().
+ SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("tag-encoder-pool")))
tagDecoderPoolOptions := pool.NewObjectPoolOptions().
+ SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("tag-decoder-pool")))
+ metricsAppenderPoolOptions := pool.NewObjectPoolOptions().
+ SetSize(2).
+ SetInstrumentOptions(instrumentOpts.
+ SetMetricsScope(instrumentOpts.MetricsScope().
+ SubScope("metrics-appender-pool")))
var cfg Configuration
if opts.remoteClientMock != nil {
@@ -764,16 +1683,19 @@ func newTestDownsampler(t *testing.T, opts testDownsamplerOptions) testDownsampl
}
instance, err := cfg.NewDownsampler(DownsamplerOptions{
- Storage: storage,
- ClusterClient: clusterclient.NewMockClient(gomock.NewController(t)),
- RulesKVStore: rulesKVStore,
- AutoMappingRules: opts.autoMappingRules,
- ClockOptions: clockOpts,
- InstrumentOptions: instrumentOpts,
- TagEncoderOptions: tagEncoderOptions,
- TagDecoderOptions: tagDecoderOptions,
- TagEncoderPoolOptions: tagEncoderPoolOptions,
- TagDecoderPoolOptions: tagDecoderPoolOptions,
+ Storage: storage,
+ ClusterClient: clusterclient.NewMockClient(gomock.NewController(t)),
+ RulesKVStore: rulesKVStore,
+ AutoMappingRules: opts.autoMappingRules,
+ ClockOptions: clockOpts,
+ InstrumentOptions: instrumentOpts,
+ TagEncoderOptions: tagEncoderOptions,
+ TagDecoderOptions: tagDecoderOptions,
+ TagEncoderPoolOptions: tagEncoderPoolOptions,
+ TagDecoderPoolOptions: tagDecoderPoolOptions,
+ MetricsAppenderPoolOptions: metricsAppenderPoolOptions,
+ RWOptions: xio.NewOptions(),
+ TagOptions: models.NewTagOptions(),
})
require.NoError(t, err)
@@ -808,8 +1730,12 @@ func newTestID(t *testing.T, tags map[string]string) id.ID {
data, ok := tagEncoder.Data()
require.True(t, ok)
- tagDecoderPool := serialize.NewTagDecoderPool(serialize.NewTagDecoderOptions(),
- pool.NewObjectPoolOptions().SetSize(1))
+ size := 1
+ tagDecoderPool := serialize.NewTagDecoderPool(
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{
+ CheckBytesWrapperPoolSize: &size,
+ }),
+ pool.NewObjectPoolOptions().SetSize(size))
tagDecoderPool.Init()
tagDecoder := tagDecoderPool.Get()
@@ -819,19 +1745,28 @@ func newTestID(t *testing.T, tags map[string]string) id.ID {
return iter
}
-func findWrite(
- t *testing.T,
+func findWrites(
writes []*storage.WriteQuery,
- name string,
-) (*storage.WriteQuery, bool) {
+ name, identTag string,
+ optionalMatchAttrs *storagemetadata.Attributes,
+) ([]*storage.WriteQuery, bool) {
+ var results []*storage.WriteQuery
for _, w := range writes {
- if t, ok := w.Tags.Get([]byte(nameTag)); ok {
- if bytes.Equal(t, []byte(name)) {
- return w, true
+ if t, ok := w.Tags().Get([]byte(identTag)); ok {
+ if !bytes.Equal(t, []byte(name)) {
+ // Does not match name.
+ continue
}
+ if optionalMatchAttrs != nil && w.Attributes() != *optionalMatchAttrs {
+ // Tried to match attributes and not matched.
+ continue
+ }
+
+ // Matches name and all optional lookups.
+ results = append(results, w)
}
}
- return nil, false
+ return results, len(results) > 0
}
func testUpdateMetadata() rules.UpdateMetadata {
diff --git a/src/cmd/services/m3coordinator/downsample/flush_handler.go b/src/cmd/services/m3coordinator/downsample/flush_handler.go
index c164e8b637..cb828696e5 100644
--- a/src/cmd/services/m3coordinator/downsample/flush_handler.go
+++ b/src/cmd/services/m3coordinator/downsample/flush_handler.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/convert"
"github.com/m3db/m3/src/x/instrument"
@@ -171,20 +172,26 @@ func (w *downsamplerFlushHandlerWriter) Write(
return
}
- err = w.handler.storage.Write(w.ctx, &storage.WriteQuery{
+ writeQuery, err := storage.NewWriteQuery(storage.WriteQueryOptions{
Tags: tags,
Datapoints: ts.Datapoints{ts.Datapoint{
Timestamp: time.Unix(0, mp.TimeNanos),
Value: mp.Value,
}},
Unit: convert.UnitForM3DB(mp.StoragePolicy.Resolution().Precision),
- Attributes: storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: mp.StoragePolicy.Retention().Duration(),
Resolution: mp.StoragePolicy.Resolution().Window,
},
})
if err != nil {
+ logger.Error("downsampler flush error creating write query", zap.Error(err))
+ w.handler.metrics.flushErrors.Inc(1)
+ return
+ }
+
+ if err := w.handler.storage.Write(w.ctx, writeQuery); err != nil {
logger.Error("downsampler flush error failed write", zap.Error(err))
w.handler.metrics.flushErrors.Inc(1)
return
diff --git a/src/cmd/services/m3coordinator/downsample/flush_handler_test.go b/src/cmd/services/m3coordinator/downsample/flush_handler_test.go
index f14996290e..70c6d96f0b 100644
--- a/src/cmd/services/m3coordinator/downsample/flush_handler_test.go
+++ b/src/cmd/services/m3coordinator/downsample/flush_handler_test.go
@@ -22,6 +22,7 @@ package downsample
import (
"bytes"
+ "sync"
"testing"
"github.com/m3db/m3/src/metrics/metric/aggregated"
@@ -29,10 +30,12 @@ import (
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage/mock"
- "github.com/m3db/m3/src/x/serialize"
- xtest "github.com/m3db/m3/src/x/test"
+ "github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ "github.com/m3db/m3/src/x/pool"
+ "github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -41,7 +44,7 @@ import (
)
func TestDownsamplerFlushHandlerCopiesTags(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mock.NewMockStorage()
@@ -95,7 +98,7 @@ func TestDownsamplerFlushHandlerCopiesTags(t *testing.T) {
require.Equal(t, 1, len(writes))
// Ensure tag pointers _DO_NOT_ match but equal to same content
- tags := writes[0].Tags.Tags
+ tags := writes[0].Tags().Tags
require.Equal(t, 1, len(tags))
tag := tags[0]
@@ -104,3 +107,106 @@ func TestDownsamplerFlushHandlerCopiesTags(t *testing.T) {
assert.False(t, xtest.ByteSlicesBackedBySameData(tagName, tag.Name))
assert.False(t, xtest.ByteSlicesBackedBySameData(tagValue, tag.Value))
}
+
+func graphiteTags(
+ t *testing.T, first string, encPool serialize.TagEncoderPool) []byte {
+ enc := encPool.Get()
+ defer enc.Finalize()
+
+ err := enc.Encode(ident.MustNewTagStringsIterator(
+ "__g0__", first,
+ "__g1__", "y",
+ "__g2__", "z",
+ string(MetricsOptionIDSchemeTagName), string(GraphiteIDSchemeTagValue),
+ ))
+
+ require.NoError(t, err)
+ data, ok := enc.Data()
+ require.True(t, ok)
+ return append(make([]byte, 0, data.Len()), data.Bytes()...)
+}
+
+func TestDownsamplerFlushHandlerHighConcurrencyNoTagMixing(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ store := mock.NewMockStorage()
+
+ size := 10
+ decodeOpts := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{
+ CheckBytesWrapperPoolSize: &size,
+ })
+
+ poolOpts := pool.NewObjectPoolOptions()
+ tagDecoderPool := serialize.NewTagDecoderPool(decodeOpts, poolOpts)
+ tagDecoderPool.Init()
+
+ pool := serialize.NewMetricTagsIteratorPool(tagDecoderPool, poolOpts)
+ pool.Init()
+
+ workers := xsync.NewWorkerPool(1)
+ workers.Init()
+
+ instrumentOpts := instrument.NewOptions()
+
+ handler := newDownsamplerFlushHandler(store, pool,
+ workers, models.NewTagOptions(), instrumentOpts)
+ writer, err := handler.NewWriter(tally.NoopScope)
+ require.NoError(t, err)
+
+ encodeOpts := serialize.NewTagEncoderOptions()
+ encPool := serialize.NewTagEncoderPool(encodeOpts, poolOpts)
+ encPool.Init()
+
+ xBytes := graphiteTags(t, "x", encPool)
+ fooBytes := graphiteTags(t, "foo", encPool)
+
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ xData := append(make([]byte, 0, len(xBytes)), xBytes...)
+ fooData := append(make([]byte, 0, len(fooBytes)), fooBytes...)
+ go func() {
+ defer wg.Done()
+ err := writer.Write(aggregated.ChunkedMetricWithStoragePolicy{
+ ChunkedMetric: aggregated.ChunkedMetric{
+ ChunkedID: id.ChunkedID{Data: xData},
+ TimeNanos: 123,
+ Value: 42.42,
+ },
+ StoragePolicy: policy.MustParseStoragePolicy("1s:1d"),
+ })
+ require.NoError(t, err)
+
+ err = writer.Write(aggregated.ChunkedMetricWithStoragePolicy{
+ ChunkedMetric: aggregated.ChunkedMetric{
+ ChunkedID: id.ChunkedID{Data: fooData},
+ TimeNanos: 123,
+ Value: 42.42,
+ },
+ StoragePolicy: policy.MustParseStoragePolicy("1s:1d"),
+ })
+ require.NoError(t, err)
+ }()
+ }
+
+ wg.Wait()
+ // Wait for flush
+ err = writer.Flush()
+ require.NoError(t, err)
+
+ // Inspect the write
+ writes := store.Writes()
+ require.Equal(t, 200, len(writes))
+
+ seenMap := make(map[string]int, 10)
+ for _, w := range writes {
+ str := w.Tags().String()
+ seenMap[str] = seenMap[str] + 1
+ }
+
+ assert.Equal(t, map[string]int{
+ "__g0__: foo, __g1__: y, __g2__: z": 100,
+ "__g0__: x, __g1__: y, __g2__: z": 100,
+ }, seenMap)
+}
diff --git a/src/cmd/services/m3coordinator/downsample/id_pool_types.go b/src/cmd/services/m3coordinator/downsample/id_pool_types.go
index 9d7b208efb..bef3b71cb1 100644
--- a/src/cmd/services/m3coordinator/downsample/id_pool_types.go
+++ b/src/cmd/services/m3coordinator/downsample/id_pool_types.go
@@ -197,9 +197,7 @@ func (p *rollupIDProvider) Err() error {
return nil
}
-func (p *rollupIDProvider) Close() {
- // No-op
-}
+func (p *rollupIDProvider) Close() {}
func (p *rollupIDProvider) Len() int {
return len(p.tagPairs) + 2
@@ -215,6 +213,10 @@ func (p *rollupIDProvider) Duplicate() ident.TagIterator {
return duplicate
}
+func (p *rollupIDProvider) Rewind() {
+ p.index = -1
+}
+
type rollupIDProviderPool struct {
tagEncoderPool serialize.TagEncoderPool
pool pool.ObjectPool
diff --git a/src/cmd/services/m3coordinator/downsample/metrics_appender.go b/src/cmd/services/m3coordinator/downsample/metrics_appender.go
index 4cf106732b..4f82fbd2ea 100644
--- a/src/cmd/services/m3coordinator/downsample/metrics_appender.go
+++ b/src/cmd/services/m3coordinator/downsample/metrics_appender.go
@@ -23,16 +23,24 @@ package downsample
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"sort"
"time"
"github.com/m3db/m3/src/aggregator/aggregator"
"github.com/m3db/m3/src/aggregator/client"
+ "github.com/m3db/m3/src/metrics/aggregation"
"github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/matcher"
"github.com/m3db/m3/src/metrics/metadata"
+ "github.com/m3db/m3/src/metrics/metric"
+ "github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/query/graphite/graphite"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/clock"
+ "github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
"github.com/golang/protobuf/jsonpb"
@@ -40,11 +48,49 @@ import (
"go.uber.org/zap/zapcore"
)
+var errNoTags = errors.New("no tags provided")
+
+type metricsAppenderPool struct {
+ pool pool.ObjectPool
+}
+
+func newMetricsAppenderPool(opts pool.ObjectPoolOptions) *metricsAppenderPool {
+ p := &metricsAppenderPool{
+ pool: pool.NewObjectPool(opts),
+ }
+ p.pool.Init(func() interface{} {
+ return newMetricsAppender(p)
+ })
+ return p
+}
+
+func (p *metricsAppenderPool) Get() *metricsAppender {
+ appender := p.pool.Get().(*metricsAppender)
+ // NB: reset appender.
+ appender.NextMetric()
+ return appender
+}
+
+func (p *metricsAppenderPool) Put(v *metricsAppender) {
+ p.pool.Put(v)
+}
+
type metricsAppender struct {
metricsAppenderOptions
- tags *tags
- multiSamplesAppender *multiSamplesAppender
+ pool *metricsAppenderPool
+
+ multiSamplesAppender *multiSamplesAppender
+ curr metadata.StagedMetadata
+ defaultStagedMetadatasCopies []metadata.StagedMetadatas
+ mappingRuleStoragePolicies []policy.StoragePolicy
+
+ cachedEncoders []serialize.TagEncoder
+ inuseEncoders []serialize.TagEncoder
+
+ originalTags *tags
+ cachedTags []*tags
+ inuseTags []*tags
}
// metricsAppenderOptions will have one of agg or clientRemote set.
@@ -52,46 +98,88 @@ type metricsAppenderOptions struct {
agg aggregator.Aggregator
clientRemote client.Client
- defaultStagedMetadatas []metadata.StagedMetadatas
- tagEncoder serialize.TagEncoder
- matcher matcher.Matcher
- metricTagsIteratorPool serialize.MetricTagsIteratorPool
+ defaultStagedMetadatasProtos []metricpb.StagedMetadatas
+ matcher matcher.Matcher
+ tagEncoderPool serialize.TagEncoderPool
+ metricTagsIteratorPool serialize.MetricTagsIteratorPool
+ augmentM3Tags bool
clockOpts clock.Options
debugLogging bool
logger *zap.Logger
}
-func newMetricsAppender(opts metricsAppenderOptions) *metricsAppender {
+func newMetricsAppender(pool *metricsAppenderPool) *metricsAppender {
return &metricsAppender{
- metricsAppenderOptions: opts,
- tags: newTags(),
- multiSamplesAppender: newMultiSamplesAppender(),
+ pool: pool,
+ multiSamplesAppender: newMultiSamplesAppender(),
+ }
+}
+
+// reset is called when pulled from the pool.
+func (a *metricsAppender) reset(opts metricsAppenderOptions) {
+ a.metricsAppenderOptions = opts
+
+ // Copy over any previous inuse encoders to the cached encoders list.
+ a.resetEncoders()
+
+ // Make sure a.defaultStagedMetadatasCopies is right length.
+ capRequired := len(opts.defaultStagedMetadatasProtos)
+ if cap(a.defaultStagedMetadatasCopies) < capRequired {
+ // Too short, reallocate.
+ slice := make([]metadata.StagedMetadatas, capRequired)
+ a.defaultStagedMetadatasCopies = slice
+ } else {
+ // Has enough capacity, take subslice.
+ slice := a.defaultStagedMetadatasCopies[:capRequired]
+ a.defaultStagedMetadatasCopies = slice
}
}
func (a *metricsAppender) AddTag(name, value []byte) {
- a.tags.append(name, value)
+ if a.originalTags == nil {
+ a.originalTags = a.tags()
+ }
+ a.originalTags.append(name, value)
}
-func (a *metricsAppender) SamplesAppender(opts SampleAppenderOptions) (SamplesAppender, error) {
+func (a *metricsAppender) SamplesAppender(opts SampleAppenderOptions) (SamplesAppenderResult, error) {
+ if a.originalTags == nil {
+ return SamplesAppenderResult{}, errNoTags
+ }
+ tags := a.originalTags
+
+ // Augment tags if necessary.
+ if a.augmentM3Tags {
+ // NB (@shreyas): Add the metric type tag. The tag has the prefix
+ // __m3_. All tags with that prefix are only used for the purpose of
+ // filter matchiand then stripped off before we actually send to the aggregator.
+ switch opts.MetricType {
+ case ts.MetricTypeCounter:
+ tags.append(metric.M3TypeTag, metric.M3CounterValue)
+ case ts.MetricTypeGauge:
+ tags.append(metric.M3TypeTag, metric.M3GaugeValue)
+ case ts.MetricTypeTimer:
+ tags.append(metric.M3TypeTag, metric.M3TimerValue)
+ }
+ }
+
// Sort tags
- sort.Sort(a.tags)
+ sort.Sort(tags)
// Encode tags and compute a temporary (unowned) ID
- a.tagEncoder.Reset()
- if err := a.tagEncoder.Encode(a.tags); err != nil {
- return nil, err
+ tagEncoder := a.tagEncoder()
+ if err := tagEncoder.Encode(tags); err != nil {
+ return SamplesAppenderResult{}, err
}
- data, ok := a.tagEncoder.Data()
+ data, ok := tagEncoder.Data()
if !ok {
- return nil, fmt.Errorf("unable to encode tags: names=%v, values=%v",
- a.tags.names, a.tags.values)
+ return SamplesAppenderResult{}, fmt.Errorf("unable to encode tags: names=%v, values=%v",
+ tags.names, tags.values)
}
a.multiSamplesAppender.reset()
unownedID := data.Bytes()
-
// Match policies and rollups and build samples appender
id := a.metricTagsIteratorPool.Get()
id.Reset(unownedID)
@@ -102,49 +190,163 @@ func (a *metricsAppender) SamplesAppender(opts SampleAppenderOptions) (SamplesAp
matchResult := a.matcher.ForwardMatch(id, fromNanos, toNanos)
id.Close()
+ // If we augmented metrics tags before running the forward match, then
+ // filter them out.
+ if a.augmentM3Tags {
+ tags.filterPrefix(metric.M3MetricsPrefix)
+ }
+
+ var dropApplyResult metadata.ApplyOrRemoveDropPoliciesResult
if opts.Override {
+ // Reuse a slice to keep the current staged metadatas we will apply.
+ a.curr.Pipelines = a.curr.Pipelines[:0]
+
for _, rule := range opts.OverrideRules.MappingRules {
stagedMetadatas, err := rule.StagedMetadatas()
if err != nil {
- return nil, err
+ return SamplesAppenderResult{}, err
}
a.debugLogMatch("downsampler applying override mapping rule",
debugLogMatchOptions{Meta: stagedMetadatas})
- a.multiSamplesAppender.addSamplesAppender(samplesAppender{
- agg: a.agg,
- clientRemote: a.clientRemote,
- unownedID: unownedID,
- stagedMetadatas: stagedMetadatas,
- })
+ pipelines := stagedMetadatas[len(stagedMetadatas)-1]
+ a.curr.Pipelines =
+ append(a.curr.Pipelines, pipelines.Pipelines...)
+ }
+
+ if err := a.addSamplesAppenders(tags, a.curr, unownedID); err != nil {
+ return SamplesAppenderResult{}, err
}
} else {
- // Always aggregate any default staged metadats
- for _, stagedMetadatas := range a.defaultStagedMetadatas {
+ // Reuse a slice to keep the current staged metadatas we will apply.
+ a.curr.Pipelines = a.curr.Pipelines[:0]
+
+ // NB(r): First apply mapping rules to see which storage policies
+ // have been applied, any that have been applied as part of
+ // mapping rules that exact match a default storage policy will be
+ // skipped when applying default rules, so as to avoid storing
+ // the same metrics in the same namespace with the same metric
+ // name and tags (i.e. overwriting each other).
+ a.mappingRuleStoragePolicies = a.mappingRuleStoragePolicies[:0]
+
+ mappingRuleStagedMetadatas := matchResult.ForExistingIDAt(nowNanos)
+ if !mappingRuleStagedMetadatas.IsDefault() && len(mappingRuleStagedMetadatas) != 0 {
+ a.debugLogMatch("downsampler applying matched mapping rule",
+ debugLogMatchOptions{Meta: mappingRuleStagedMetadatas})
+
+ // Collect all the current active mapping rules
+ for _, stagedMetadata := range mappingRuleStagedMetadatas {
+ for _, pipe := range stagedMetadata.Pipelines {
+ for _, sp := range pipe.StoragePolicies {
+ a.mappingRuleStoragePolicies =
+ append(a.mappingRuleStoragePolicies, sp)
+ }
+ }
+ }
+
+ // Only sample if going to actually aggregate
+ pipelines := mappingRuleStagedMetadatas[len(mappingRuleStagedMetadatas)-1]
+ a.curr.Pipelines =
+ append(a.curr.Pipelines, pipelines.Pipelines...)
+ }
+
+ // Always aggregate any default staged metadatas (unless
+ // mapping rule has provided an override for a storage policy,
+ // if so then skip aggregating for that storage policy).
+ for idx, stagedMetadatasProto := range a.defaultStagedMetadatasProtos {
+ // NB(r): Need to take copy of default staged metadatas as we
+ // sometimes mutate it.
+ stagedMetadatas := a.defaultStagedMetadatasCopies[idx]
+ err := stagedMetadatas.FromProto(stagedMetadatasProto)
+ if err != nil {
+ return SamplesAppenderResult{},
+ fmt.Errorf("unable to copy default staged metadatas: %v", err)
+ }
+
+ // Save the staged metadatas back to the idx so all slices can be reused.
+ a.defaultStagedMetadatasCopies[idx] = stagedMetadatas
+
+ stagedMetadataBeforeFilter := stagedMetadatas[:]
+ if len(a.mappingRuleStoragePolicies) != 0 {
+ // If mapping rules have applied aggregations for
+ // storage policies then de-dupe so we don't have two
+ // active aggregations for the same storage policy.
+ stagedMetadatasAfterFilter := stagedMetadatas[:0]
+ for _, stagedMetadata := range stagedMetadatas {
+ pipesAfterFilter := stagedMetadata.Pipelines[:0]
+ for _, pipe := range stagedMetadata.Pipelines {
+ storagePoliciesAfterFilter := pipe.StoragePolicies[:0]
+ for _, sp := range pipe.StoragePolicies {
+ // Check aggregation for storage policy not already
+ // set by a mapping rule.
+ matchedByMappingRule := false
+ for _, existing := range a.mappingRuleStoragePolicies {
+ if sp.Equivalent(existing) {
+ matchedByMappingRule = true
+ a.debugLogMatch("downsampler skipping default mapping rule storage policy",
+ debugLogMatchOptions{Meta: stagedMetadataBeforeFilter})
+ break
+ }
+ }
+ if !matchedByMappingRule {
+ // Keep storage policy if not matched by mapping rule.
+ storagePoliciesAfterFilter =
+ append(storagePoliciesAfterFilter, sp)
+ }
+ }
+
+ // Update storage policies slice after filtering.
+ pipe.StoragePolicies = storagePoliciesAfterFilter
+
+ if len(pipe.StoragePolicies) != 0 {
+ // Keep storage policy if still has some storage policies.
+ pipesAfterFilter = append(pipesAfterFilter, pipe)
+ }
+ }
+
+ // Update pipelnes after filtering.
+ stagedMetadata.Pipelines = pipesAfterFilter
+
+ if len(stagedMetadata.Pipelines) != 0 {
+ // Keep staged metadata if still has some pipelines.
+ stagedMetadatasAfterFilter =
+ append(stagedMetadatasAfterFilter, stagedMetadata)
+ }
+ }
+
+ // Finally set the staged metadatas we're keeping
+ // as those that were kept after filtering.
+ stagedMetadatas = stagedMetadatasAfterFilter
+ }
+
+ // Now skip appending if after filtering there's no staged metadatas
+ // after any filtering that's applied.
+ if len(stagedMetadatas) == 0 {
+ a.debugLogMatch("downsampler skipping default mapping rule completely",
+ debugLogMatchOptions{Meta: stagedMetadataBeforeFilter})
+ continue
+ }
+
a.debugLogMatch("downsampler applying default mapping rule",
debugLogMatchOptions{Meta: stagedMetadatas})
- a.multiSamplesAppender.addSamplesAppender(samplesAppender{
- agg: a.agg,
- clientRemote: a.clientRemote,
- unownedID: unownedID,
- stagedMetadatas: stagedMetadatas,
- })
+ pipelines := stagedMetadatas[len(stagedMetadatas)-1]
+ a.curr.Pipelines =
+ append(a.curr.Pipelines, pipelines.Pipelines...)
}
- stagedMetadatas := matchResult.ForExistingIDAt(nowNanos)
- if !stagedMetadatas.IsDefault() && len(stagedMetadatas) != 0 {
- a.debugLogMatch("downsampler applying matched mapping rule",
- debugLogMatchOptions{Meta: stagedMetadatas})
+ // Apply drop policies results
+ a.curr.Pipelines, dropApplyResult = a.curr.Pipelines.ApplyOrRemoveDropPolicies()
- // Only sample if going to actually aggregate
- a.multiSamplesAppender.addSamplesAppender(samplesAppender{
- agg: a.agg,
- clientRemote: a.clientRemote,
- unownedID: unownedID,
- stagedMetadatas: stagedMetadatas,
- })
+ if len(a.curr.Pipelines) > 0 && !a.curr.IsDropPolicyApplied() {
+ // Send to downsampler if we have something in the pipeline.
+ a.debugLogMatch("downsampler using built mapping staged metadatas",
+ debugLogMatchOptions{Meta: []metadata.StagedMetadata{a.curr}})
+
+ if err := a.addSamplesAppenders(tags, a.curr, unownedID); err != nil {
+ return SamplesAppenderResult{}, err
+ }
}
numRollups := matchResult.NumNewRollupIDs()
@@ -152,8 +354,7 @@ func (a *metricsAppender) SamplesAppender(opts SampleAppenderOptions) (SamplesAp
rollup := matchResult.ForNewRollupIDsAt(i, nowNanos)
a.debugLogMatch("downsampler applying matched rollup rule",
- debugLogMatchOptions{Meta: stagedMetadatas, RollupID: rollup.ID})
-
+ debugLogMatchOptions{Meta: rollup.Metadatas, RollupID: rollup.ID})
a.multiSamplesAppender.addSamplesAppender(samplesAppender{
agg: a.agg,
clientRemote: a.clientRemote,
@@ -163,12 +364,17 @@ func (a *metricsAppender) SamplesAppender(opts SampleAppenderOptions) (SamplesAp
}
}
- return a.multiSamplesAppender, nil
+ dropPolicyApplied := dropApplyResult != metadata.NoDropPolicyPresentResult
+ return SamplesAppenderResult{
+ SamplesAppender: a.multiSamplesAppender,
+ IsDropPolicyApplied: dropPolicyApplied,
+ }, nil
}
type debugLogMatchOptions struct {
- Meta metadata.StagedMetadatas
- RollupID []byte
+ Meta metadata.StagedMetadatas
+ StoragePolicy policy.StoragePolicy
+ RollupID []byte
}
func (a *metricsAppender) debugLogMatch(str string, opts debugLogMatchOptions) {
@@ -176,7 +382,7 @@ func (a *metricsAppender) debugLogMatch(str string, opts debugLogMatchOptions) {
return
}
fields := []zapcore.Field{
- zap.String("tags", a.tags.String()),
+ zap.String("tags", a.originalTags.String()),
}
if v := opts.RollupID; v != nil {
fields = append(fields, zap.ByteString("rollupID", v))
@@ -184,17 +390,217 @@ func (a *metricsAppender) debugLogMatch(str string, opts debugLogMatchOptions) {
if v := opts.Meta; v != nil {
fields = append(fields, stagedMetadatasLogField(v))
}
+ if v := opts.StoragePolicy; v != policy.EmptyStoragePolicy {
+ fields = append(fields, zap.Stringer("storagePolicy", v))
+ }
a.logger.Debug(str, fields...)
}
-func (a *metricsAppender) Reset() {
- a.tags.names = a.tags.names[:0]
- a.tags.values = a.tags.values[:0]
+func (a *metricsAppender) NextMetric() {
+ // Move the inuse encoders to cached as we should be done with using them.
+ a.resetEncoders()
+ a.resetTags()
}
func (a *metricsAppender) Finalize() {
- a.tagEncoder.Finalize()
- a.tagEncoder = nil
+ // Return to pool.
+ a.pool.Put(a)
+}
+
+func (a *metricsAppender) tagEncoder() serialize.TagEncoder {
+ // Take an encoder from the cached encoder list, if not present get one
+ // from the pool. Add the returned encoder to the used list.
+ var tagEncoder serialize.TagEncoder
+ if len(a.cachedEncoders) == 0 {
+ tagEncoder = a.tagEncoderPool.Get()
+ } else {
+ l := len(a.cachedEncoders)
+ tagEncoder = a.cachedEncoders[l-1]
+ a.cachedEncoders = a.cachedEncoders[:l-1]
+ }
+ a.inuseEncoders = append(a.inuseEncoders, tagEncoder)
+ tagEncoder.Reset()
+ return tagEncoder
+}
+
+func (a *metricsAppender) tags() *tags {
+ // Take an encoder from the cached encoder list, if not present get one
+ // from the pool. Add the returned encoder to the used list.
+ var t *tags
+ if len(a.cachedTags) == 0 {
+ t = newTags()
+ } else {
+ l := len(a.cachedTags)
+ t = a.cachedTags[l-1]
+ a.cachedTags = a.cachedTags[:l-1]
+ }
+ a.inuseTags = append(a.inuseTags, t)
+ t.names = t.names[:0]
+ t.values = t.values[:0]
+ t.reset()
+ return t
+}
+
+func (a *metricsAppender) resetEncoders() {
+ a.cachedEncoders = append(a.cachedEncoders, a.inuseEncoders...)
+ for i := range a.inuseEncoders {
+ a.inuseEncoders[i] = nil
+ }
+ a.inuseEncoders = a.inuseEncoders[:0]
+}
+
+func (a *metricsAppender) resetTags() {
+ a.cachedTags = append(a.cachedTags, a.inuseTags...)
+ for i := range a.inuseTags {
+ a.inuseTags[i] = nil
+ }
+ a.inuseTags = a.inuseTags[:0]
+ a.originalTags = nil
+}
+
+func (a *metricsAppender) addSamplesAppenders(
+ originalTags *tags,
+ stagedMetadata metadata.StagedMetadata,
+ unownedID []byte,
+) error {
+ // Check if any of the pipelines have tags or a graphite prefix to set.
+ var tagsExist bool
+ for _, pipeline := range stagedMetadata.Pipelines {
+ if len(pipeline.Tags) > 0 || len(pipeline.GraphitePrefix) > 0 {
+ tagsExist = true
+ break
+ }
+ }
+
+ // If we do not need to do any tag augmentation then just return.
+ if !a.augmentM3Tags && !tagsExist {
+ a.multiSamplesAppender.addSamplesAppender(samplesAppender{
+ agg: a.agg,
+ clientRemote: a.clientRemote,
+ unownedID: unownedID,
+ stagedMetadatas: []metadata.StagedMetadata{stagedMetadata},
+ })
+ return nil
+ }
+
+ var (
+ pipelines []metadata.PipelineMetadata
+ )
+ for _, pipeline := range stagedMetadata.Pipelines {
+ // For pipeline which have tags to augment we generate and send
+ // separate IDs. Other pipelines return the same.
+ pipeline := pipeline
+ if len(pipeline.Tags) == 0 && len(pipeline.GraphitePrefix) == 0 {
+ pipelines = append(pipelines, pipeline)
+ continue
+ }
+
+ tags := a.augmentTags(originalTags, pipeline.GraphitePrefix, pipeline.Tags, pipeline.AggregationID)
+
+ sm := stagedMetadata
+ sm.Pipelines = []metadata.PipelineMetadata{pipeline}
+
+ appender, err := a.newSamplesAppender(tags, sm)
+ if err != nil {
+ return err
+ }
+ a.multiSamplesAppender.addSamplesAppender(appender)
+ }
+
+ if len(pipelines) == 0 {
+ return nil
+ }
+
+ sm := stagedMetadata
+ sm.Pipelines = pipelines
+
+ appender, err := a.newSamplesAppender(originalTags, sm)
+ if err != nil {
+ return err
+ }
+ a.multiSamplesAppender.addSamplesAppender(appender)
+ return nil
+}
+
+func (a *metricsAppender) newSamplesAppender(
+ tags *tags,
+ sm metadata.StagedMetadata,
+) (samplesAppender, error) {
+ tagEncoder := a.tagEncoder()
+ if err := tagEncoder.Encode(tags); err != nil {
+ return samplesAppender{}, err
+ }
+ data, ok := tagEncoder.Data()
+ if !ok {
+ return samplesAppender{}, fmt.Errorf("unable to encode tags: names=%v, values=%v", tags.names, tags.values)
+ }
+ return samplesAppender{
+ agg: a.agg,
+ clientRemote: a.clientRemote,
+ unownedID: data.Bytes(),
+ stagedMetadatas: []metadata.StagedMetadata{sm},
+ }, nil
+}
+
+func (a *metricsAppender) augmentTags(
+ originalTags *tags,
+ graphitePrefix [][]byte,
+ t []models.Tag,
+ id aggregation.ID,
+) *tags {
+ // Create the prefix tags if any.
+ tags := a.tags()
+ for i, path := range graphitePrefix {
+ // Add the graphite prefix as the initial graphite tags.
+ tags.append(graphite.TagName(i), path)
+ }
+
+ // Make a copy of the tags to augment.
+ prefixes := len(graphitePrefix)
+ for i := range originalTags.names {
+ // If we applied prefixes then we need to parse and modify the original
+ // tags. Check if the original tag was graphite type, if so add the
+ // number of prefixes to the tag index and update.
+ var (
+ name = originalTags.names[i]
+ value = originalTags.values[i]
+ )
+ if prefixes > 0 {
+ // If the tag seen is a graphite tag then offset it based on number
+ // of prefixes we have seen.
+ if index, ok := graphite.TagIndex(name); ok {
+ name = graphite.TagName(index + prefixes)
+ }
+ }
+ tags.append(name, value)
+ }
+
+ // Add any additional tags we need to.
+ for _, tag := range t {
+ // If the tag is not special tag, then just add it.
+ if !bytes.HasPrefix(tag.Name, metric.M3MetricsPrefix) {
+ if len(tag.Name) > 0 && len(tag.Value) > 0 {
+ tags.append(tag.Name, tag.Value)
+ }
+ continue
+ }
+
+ // Handle m3 special tags.
+ if bytes.Equal(tag.Name, metric.M3MetricsGraphiteAggregation) {
+ // Add the aggregation tag as the last graphite tag.
+ types, err := id.Types()
+ if err != nil || len(types) == 0 {
+ continue
+ }
+ var (
+ count = tags.countPrefix(graphite.Prefix)
+ name = graphite.TagName(count)
+ value = types[0].Bytes()
+ )
+ tags.append(name, value)
+ }
+ }
+ return tags
}
func stagedMetadatasLogField(sm metadata.StagedMetadatas) zapcore.Field {
diff --git a/src/cmd/services/m3coordinator/downsample/metrics_appender_test.go b/src/cmd/services/m3coordinator/downsample/metrics_appender_test.go
new file mode 100644
index 0000000000..af3463c592
--- /dev/null
+++ b/src/cmd/services/m3coordinator/downsample/metrics_appender_test.go
@@ -0,0 +1,156 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package downsample
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/m3db/m3/src/aggregator/aggregator"
+ "github.com/m3db/m3/src/metrics/matcher"
+ "github.com/m3db/m3/src/metrics/metadata"
+ "github.com/m3db/m3/src/metrics/metric/id"
+ "github.com/m3db/m3/src/metrics/metric/unaggregated"
+ "github.com/m3db/m3/src/metrics/rules"
+ "github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/pool"
+ "github.com/m3db/m3/src/x/serialize"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSamplesAppenderPoolResetsTagsAcrossSamples(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ count := 3
+
+ poolOpts := pool.NewObjectPoolOptions().SetSize(1)
+ appenderPool := newMetricsAppenderPool(poolOpts)
+
+ tagEncoderPool := serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(),
+ poolOpts)
+ tagEncoderPool.Init()
+
+ size := 1
+ tagDecoderPool := serialize.NewTagDecoderPool(
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{
+ CheckBytesWrapperPoolSize: &size,
+ }), poolOpts)
+ tagDecoderPool.Init()
+
+ metricTagsIteratorPool := serialize.NewMetricTagsIteratorPool(tagDecoderPool, poolOpts)
+ metricTagsIteratorPool.Init()
+
+ for i := 0; i < count; i++ {
+ matcher := matcher.NewMockMatcher(ctrl)
+ matcher.EXPECT().ForwardMatch(gomock.Any(), gomock.Any(), gomock.Any()).
+ DoAndReturn(func(encodedID id.ID, _, _ int64) rules.MatchResult {
+ // NB: ensure tags are cleared correctly between runs.
+ bs := encodedID.Bytes()
+
+ decoder := tagDecoderPool.Get()
+ decoder.Reset(checked.NewBytes(bs, nil))
+
+ var id string
+ for decoder.Next() {
+ tag := decoder.Current()
+ tagStr := fmt.Sprintf("%s-%s", tag.Name.String(), tag.Value.String())
+ if len(id) == 0 {
+ id = tagStr
+ } else {
+ id = fmt.Sprintf("%s,%s", id, tagStr)
+ }
+ }
+
+ decoder.Close()
+ return rules.NewMatchResult(1, 1,
+ metadata.StagedMetadatas{},
+ []rules.IDWithMetadatas{
+ {
+ ID: []byte(id),
+ Metadatas: metadata.StagedMetadatas{},
+ },
+ },
+ )
+ })
+
+ appender := appenderPool.Get()
+ agg := aggregator.NewMockAggregator(ctrl)
+ appender.reset(metricsAppenderOptions{
+ tagEncoderPool: tagEncoderPool,
+ metricTagsIteratorPool: metricTagsIteratorPool,
+ matcher: matcher,
+ agg: agg,
+ })
+ name := []byte(fmt.Sprint("foo", i))
+ value := []byte(fmt.Sprint("bar", i))
+ appender.AddTag(name, value)
+ a, err := appender.SamplesAppender(SampleAppenderOptions{})
+ require.NoError(t, err)
+
+ agg.EXPECT().AddUntimed(gomock.Any(), gomock.Any()).DoAndReturn(
+ func(u unaggregated.MetricUnion, _ metadata.StagedMetadatas) error {
+ if u.CounterVal != int64(i) {
+ return errors.New("wrong counter value")
+ }
+
+ // NB: expected ID is generated into human-readable form
+ // from tags in ForwardMatch mock above.
+ expected := fmt.Sprintf("foo%d-bar%d", i, i)
+ if expected != u.ID.String() {
+ // NB: if this fails, appender is holding state after Finalize.
+ return fmt.Errorf("expected ID %s, got %s", expected, u.ID.String())
+ }
+
+ return nil
+ },
+ )
+
+ require.NoError(t, a.SamplesAppender.AppendCounterSample(int64(i)))
+
+ assert.False(t, a.IsDropPolicyApplied)
+ appender.Finalize()
+ }
+}
+
+func TestSamplesAppenderPoolResetsTagSimple(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ poolOpts := pool.NewObjectPoolOptions().SetSize(1)
+ appenderPool := newMetricsAppenderPool(poolOpts)
+
+ appender := appenderPool.Get()
+ appender.AddTag([]byte("foo"), []byte("bar"))
+ assert.Equal(t, 1, len(appender.originalTags.names))
+ assert.Equal(t, 1, len(appender.originalTags.values))
+ appender.Finalize()
+
+ // NB: getting a new appender from the pool yields a clean appender.
+ appender = appenderPool.Get()
+ assert.Nil(t, appender.originalTags)
+ appender.Finalize()
+}
diff --git a/src/cmd/services/m3coordinator/downsample/options.go b/src/cmd/services/m3coordinator/downsample/options.go
index fdae1af32c..e11fcca469 100644
--- a/src/cmd/services/m3coordinator/downsample/options.go
+++ b/src/cmd/services/m3coordinator/downsample/options.go
@@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"runtime"
+ "strings"
"time"
"github.com/m3db/m3/src/aggregator/aggregator"
@@ -39,12 +40,14 @@ import (
"github.com/m3db/m3/src/metrics/aggregation"
"github.com/m3db/m3/src/metrics/filters"
"github.com/m3db/m3/src/metrics/generated/proto/aggregationpb"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/generated/proto/pipelinepb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
"github.com/m3db/m3/src/metrics/generated/proto/transformationpb"
"github.com/m3db/m3/src/metrics/matcher"
"github.com/m3db/m3/src/metrics/matcher/cache"
"github.com/m3db/m3/src/metrics/metadata"
+ "github.com/m3db/m3/src/metrics/metric"
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/id"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
@@ -60,6 +63,7 @@ import (
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
@@ -82,34 +86,37 @@ const (
var (
numShards = runtime.NumCPU()
- errNoStorage = errors.New("dynamic downsampling enabled with storage not set")
- errNoClusterClient = errors.New("dynamic downsampling enabled with cluster client not set")
- errNoRulesStore = errors.New("dynamic downsampling enabled with rules store not set")
- errNoClockOptions = errors.New("dynamic downsampling enabled with clock options not set")
- errNoInstrumentOptions = errors.New("dynamic downsampling enabled with instrument options not set")
- errNoTagEncoderOptions = errors.New("dynamic downsampling enabled with tag encoder options not set")
- errNoTagDecoderOptions = errors.New("dynamic downsampling enabled with tag decoder options not set")
- errNoTagEncoderPoolOptions = errors.New("dynamic downsampling enabled with tag encoder pool options not set")
- errNoTagDecoderPoolOptions = errors.New("dynamic downsampling enabled with tag decoder pool options not set")
- errRollupRuleNoTransforms = errors.New("rollup rule has no transforms set")
+ errNoStorage = errors.New("downsampling enabled with storage not set")
+ errNoClusterClient = errors.New("downsampling enabled with cluster client not set")
+ errNoRulesStore = errors.New("downsampling enabled with rules store not set")
+ errNoClockOptions = errors.New("downsampling enabled with clock options not set")
+ errNoInstrumentOptions = errors.New("downsampling enabled with instrument options not set")
+ errNoTagEncoderOptions = errors.New("downsampling enabled with tag encoder options not set")
+ errNoTagDecoderOptions = errors.New("downsampling enabled with tag decoder options not set")
+ errNoTagEncoderPoolOptions = errors.New("downsampling enabled with tag encoder pool options not set")
+ errNoTagDecoderPoolOptions = errors.New("downsampling enabled with tag decoder pool options not set")
+ errNoMetricsAppenderPoolOptions = errors.New("downsampling enabled with metrics appender pool options not set")
+ errRollupRuleNoTransforms = errors.New("rollup rule has no transforms set")
)
// DownsamplerOptions is a set of required downsampler options.
type DownsamplerOptions struct {
- Storage storage.Storage
- StorageFlushConcurrency int
- ClusterClient clusterclient.Client
- RulesKVStore kv.Store
- AutoMappingRules []AutoMappingRule
- NameTag string
- ClockOptions clock.Options
- InstrumentOptions instrument.Options
- TagEncoderOptions serialize.TagEncoderOptions
- TagDecoderOptions serialize.TagDecoderOptions
- TagEncoderPoolOptions pool.ObjectPoolOptions
- TagDecoderPoolOptions pool.ObjectPoolOptions
- OpenTimeout time.Duration
- TagOptions models.TagOptions
+ Storage storage.Storage
+ StorageFlushConcurrency int
+ ClusterClient clusterclient.Client
+ RulesKVStore kv.Store
+ AutoMappingRules []AutoMappingRule
+ NameTag string
+ ClockOptions clock.Options
+ InstrumentOptions instrument.Options
+ TagEncoderOptions serialize.TagEncoderOptions
+ TagDecoderOptions serialize.TagDecoderOptions
+ TagEncoderPoolOptions pool.ObjectPoolOptions
+ TagDecoderPoolOptions pool.ObjectPoolOptions
+ OpenTimeout time.Duration
+ TagOptions models.TagOptions
+ MetricsAppenderPoolOptions pool.ObjectPoolOptions
+ RWOptions xio.Options
}
// AutoMappingRule is a mapping rule to apply to metrics.
@@ -168,6 +175,9 @@ func (o DownsamplerOptions) validate() error {
if o.TagDecoderPoolOptions == nil {
return errNoTagDecoderPoolOptions
}
+ if o.MetricsAppenderPoolOptions == nil {
+ return errNoMetricsAppenderPoolOptions
+ }
return nil
}
@@ -177,14 +187,18 @@ type agg struct {
aggregator aggregator.Aggregator
clientRemote client.Client
- defaultStagedMetadatas []metadata.StagedMetadatas
- clockOpts clock.Options
- matcher matcher.Matcher
- pools aggPools
+ defaultStagedMetadatasProtos []metricpb.StagedMetadatas
+ clockOpts clock.Options
+ matcher matcher.Matcher
+ pools aggPools
+ m3PrefixFilter bool
}
// Configuration configurates a downsampler.
type Configuration struct {
+ // Matcher is the configuration for the downsampler matcher.
+ Matcher MatcherConfiguration `yaml:"matcher"`
+
// Rules is a set of downsample rules. If set, this overrides any rules set
// in the KV store (and the rules in KV store are not evaluated at all).
Rules *RulesConfiguration `yaml:"rules"`
@@ -211,6 +225,21 @@ type Configuration struct {
// EntryTTL determines how long an entry remains alive before it may be expired due to inactivity.
EntryTTL time.Duration `yaml:"entryTTL"`
+
+ // DisableAutoMappingRules disables auto mapping rules.
+ DisableAutoMappingRules bool `yaml:"disableAutoMappingRules"`
+}
+
+// MatcherConfiguration is the configuration for the rule matcher.
+type MatcherConfiguration struct {
+ // Cache if non-zero will set the capacity of the rules matching cache.
+ Cache MatcherCacheConfiguration `yaml:"cache"`
+}
+
+// MatcherCacheConfiguration is the configuration for the rule matcher cache.
+type MatcherCacheConfiguration struct {
+ // Capacity if non-zero will set the capacity of the rules matching cache.
+ Capacity int `yaml:"capacity"`
}
// RulesConfiguration is a set of rules configuration to use for downsampling.
@@ -265,12 +294,30 @@ type MappingRuleConfiguration struct {
// keeping them with a storage policy.
Drop bool `yaml:"drop"`
+ // Tags are the tags to be added to the metric while applying the mapping
+ // rule. Users are free to add name/value combinations to the metric. The
+ // coordinator also supports certain first class tags which will augment
+ // the metric with coordinator generated tag values.
+ // __m3_graphite_aggregation__ as a tag will augment the metric with an
+ // aggregation tag which is required for graphite. If a metric is of the
+ // form {__g0__:stats __g1__:metric __g2__:timer} and we have configured
+ // a P95 aggregation, this option will add __g3__:P95 to the metric.
+ Tags []Tag `yaml:"tags"`
+
// Optional fields follow.
// Name is optional.
Name string `yaml:"name"`
}
+// Tag is structure describing tags as used by mapping rule configuration.
+type Tag struct {
+ // Name is the tag name.
+ Name string `yaml:"name"`
+ // Value is the tag value.
+ Value string `yaml:"value"`
+}
+
// Rule returns the mapping rule for the mapping rule configuration.
func (r MappingRuleConfiguration) Rule() (view.MappingRule, error) {
id := uuid.New()
@@ -292,7 +339,15 @@ func (r MappingRuleConfiguration) Rule() (view.MappingRule, error) {
var drop policy.DropPolicy
if r.Drop {
- drop = policy.DropMust
+ drop = policy.DropIfOnlyMatch
+ }
+
+ tags := make([]models.Tag, 0, len(r.Tags))
+ for _, tag := range r.Tags {
+ tags = append(tags, models.Tag{
+ Name: []byte(tag.Name),
+ Value: []byte(tag.Value),
+ })
}
return view.MappingRule{
@@ -302,6 +357,7 @@ func (r MappingRuleConfiguration) Rule() (view.MappingRule, error) {
AggregationID: aggID,
StoragePolicies: storagePolicies,
DropPolicy: drop,
+ Tags: tags,
}, nil
}
@@ -513,11 +569,13 @@ func (c RemoteAggregatorConfiguration) newClient(
kvClient clusterclient.Client,
clockOpts clock.Options,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (client.Client, error) {
if c.clientOverride != nil {
return c.clientOverride, nil
}
- return c.Client.NewClient(kvClient, clockOpts, instrumentOpts)
+
+ return c.Client.NewClient(kvClient, clockOpts, instrumentOpts, rwOpts)
}
// BufferPastLimitConfiguration specifies a custom buffer past limit
@@ -549,13 +607,14 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
}
var (
- storageFlushConcurrency = defaultStorageFlushConcurrency
- clockOpts = o.ClockOptions
- instrumentOpts = o.InstrumentOptions
- scope = instrumentOpts.MetricsScope()
- logger = instrumentOpts.Logger()
- openTimeout = defaultOpenTimeout
- defaultStagedMetadatas []metadata.StagedMetadatas
+ storageFlushConcurrency = defaultStorageFlushConcurrency
+ clockOpts = o.ClockOptions
+ instrumentOpts = o.InstrumentOptions
+ scope = instrumentOpts.MetricsScope()
+ logger = instrumentOpts.Logger()
+ openTimeout = defaultOpenTimeout
+ defaultStagedMetadatasProtos []metricpb.StagedMetadatas
+ m3PrefixFilter = false
)
if o.StorageFlushConcurrency > 0 {
storageFlushConcurrency = o.StorageFlushConcurrency
@@ -568,7 +627,14 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
if err != nil {
return agg{}, err
}
- defaultStagedMetadatas = append(defaultStagedMetadatas, metadatas)
+
+ var metadatasProto metricpb.StagedMetadatas
+ if err := metadatas.ToProto(&metadatasProto); err != nil {
+ return agg{}, err
+ }
+
+ defaultStagedMetadatasProtos =
+ append(defaultStagedMetadatasProtos, metadatasProto)
}
pools := o.newAggregatorPools()
@@ -616,6 +682,9 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
rs := rules.NewEmptyRuleSet(defaultConfigInMemoryNamespace,
updateMetadata)
for _, mappingRule := range cfg.Rules.MappingRules {
+ if strings.Contains(mappingRule.Filter, metric.M3MetricsPrefixString) {
+ m3PrefixFilter = true
+ }
rule, err := mappingRule.Rule()
if err != nil {
return agg{}, err
@@ -628,6 +697,9 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
}
for _, rollupRule := range cfg.Rules.RollupRules {
+ if strings.Contains(rollupRule.Filter, metric.M3MetricsPrefixString) {
+ m3PrefixFilter = true
+ }
rule, err := rollupRule.Rule()
if err != nil {
return agg{}, err
@@ -650,7 +722,12 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
matcherOpts = matcherOpts.SetKVStore(kvTxnMemStore)
}
- matcher, err := o.newAggregatorMatcher(matcherOpts)
+ matcherCacheCapacity := defaultMatcherCacheCapacity
+ if v := cfg.Matcher.Cache.Capacity; v > 0 {
+ matcherCacheCapacity = v
+ }
+
+ matcher, err := o.newAggregatorMatcher(matcherOpts, matcherCacheCapacity)
if err != nil {
return agg{}, err
}
@@ -658,9 +735,15 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
if remoteAgg := cfg.RemoteAggregator; remoteAgg != nil {
// If downsampling setup to use a remote aggregator instead of local
// aggregator, set that up instead.
- client, err := remoteAgg.newClient(o.ClusterClient, clockOpts,
- instrumentOpts.SetMetricsScope(instrumentOpts.MetricsScope().
- SubScope("remote-aggregator-client")))
+ scope := instrumentOpts.MetricsScope().SubScope("remote-aggregator-client")
+ iOpts := instrumentOpts.SetMetricsScope(scope)
+ rwOpts := o.RWOptions
+ if rwOpts == nil {
+ logger.Info("no rw options set, using default")
+ rwOpts = xio.NewOptions()
+ }
+
+ client, err := remoteAgg.newClient(o.ClusterClient, clockOpts, iOpts, rwOpts)
if err != nil {
err = fmt.Errorf("could not create remote aggregator client: %v", err)
return agg{}, err
@@ -670,10 +753,11 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
}
return agg{
- clientRemote: client,
- defaultStagedMetadatas: defaultStagedMetadatas,
- matcher: matcher,
- pools: pools,
+ clientRemote: client,
+ defaultStagedMetadatasProtos: defaultStagedMetadatasProtos,
+ matcher: matcher,
+ pools: pools,
+ m3PrefixFilter: m3PrefixFilter,
}, nil
}
@@ -719,10 +803,15 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
return bufferForPastTimedMetric(bufferPastLimits, tile)
}
+ maxAllowedForwardingDelayFn := func(tile time.Duration, numForwardedTimes int) time.Duration {
+ return maxAllowedForwardingDelay(bufferPastLimits, tile, numForwardedTimes)
+ }
+
// Finally construct all options.
aggregatorOpts := aggregator.NewOptions().
SetClockOptions(clockOpts).
SetInstrumentOptions(instrumentOpts).
+ SetDefaultStoragePolicies(nil).
SetMetricPrefix(nil).
SetCounterPrefix(nil).
SetGaugePrefix(nil).
@@ -734,6 +823,7 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
SetFlushHandler(flushHandler).
SetBufferForPastTimedMetricFn(bufferForPastTimedMetricFn).
SetBufferForFutureTimedMetric(defaultBufferFutureTimedMetric).
+ SetMaxAllowedForwardingDelayFn(maxAllowedForwardingDelayFn).
SetVerboseErrors(defaultVerboseErrors)
if cfg.EntryTTL != 0 {
@@ -829,10 +919,11 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
}
return agg{
- aggregator: aggregatorInstance,
- defaultStagedMetadatas: defaultStagedMetadatas,
- matcher: matcher,
- pools: pools,
+ aggregator: aggregatorInstance,
+ defaultStagedMetadatasProtos: defaultStagedMetadatasProtos,
+ matcher: matcher,
+ pools: pools,
+ m3PrefixFilter: m3PrefixFilter,
}, nil
}
@@ -840,6 +931,7 @@ type aggPools struct {
tagEncoderPool serialize.TagEncoderPool
tagDecoderPool serialize.TagDecoderPool
metricTagsIteratorPool serialize.MetricTagsIteratorPool
+ metricsAppenderPool *metricsAppenderPool
}
func (o DownsamplerOptions) newAggregatorPools() aggPools {
@@ -855,10 +947,13 @@ func (o DownsamplerOptions) newAggregatorPools() aggPools {
o.TagDecoderPoolOptions)
metricTagsIteratorPool.Init()
+ metricsAppenderPool := newMetricsAppenderPool(o.MetricsAppenderPoolOptions)
+
return aggPools{
tagEncoderPool: tagEncoderPool,
tagDecoderPool: tagDecoderPool,
metricTagsIteratorPool: metricTagsIteratorPool,
+ metricsAppenderPool: metricsAppenderPool,
}
}
@@ -879,7 +974,7 @@ func (o DownsamplerOptions) newAggregatorRulesOptions(pools aggPools) rules.Opti
NameAndTagsFn: func(id []byte) ([]byte, []byte, error) {
name, err := resolveEncodedTagsNameTag(id, pools.metricTagsIteratorPool,
nameTag)
- if err != nil {
+ if err != nil && err != errNoMetricNameTag {
return nil, nil, err
}
// ID is always the encoded tags for IDs in the downsampler
@@ -915,15 +1010,15 @@ func (o DownsamplerOptions) newAggregatorRulesOptions(pools aggPools) rules.Opti
func (o DownsamplerOptions) newAggregatorMatcher(
opts matcher.Options,
+ capacity int,
) (matcher.Matcher, error) {
cacheOpts := cache.NewOptions().
- SetCapacity(defaultMatcherCacheCapacity).
+ SetCapacity(capacity).
SetClockOptions(opts.ClockOptions()).
SetInstrumentOptions(opts.InstrumentOptions().
SetMetricsScope(opts.InstrumentOptions().MetricsScope().SubScope("matcher-cache")))
cache := cache.NewCache(cacheOpts)
-
return matcher.NewMatcher(cache, opts)
}
@@ -1061,6 +1156,14 @@ func (c *aggregatorLocalAdminClient) WriteTimed(
return c.agg.AddTimed(metric, metadata)
}
+// WriteTimedWithStagedMetadatas writes timed metrics with staged metadatas.
+func (c *aggregatorLocalAdminClient) WriteTimedWithStagedMetadatas(
+ metric aggregated.Metric,
+ metadatas metadata.StagedMetadatas,
+) error {
+ return c.agg.AddTimedWithStagedMetadatas(metric, metadatas)
+}
+
// WriteForwarded writes forwarded metrics.
func (c *aggregatorLocalAdminClient) WriteForwarded(
metric aggregated.ForwardedMetric,
@@ -1069,6 +1172,14 @@ func (c *aggregatorLocalAdminClient) WriteForwarded(
return c.agg.AddForwarded(metric, metadata)
}
+// WritePassthrough writes passthrough metrics.
+func (c *aggregatorLocalAdminClient) WritePassthrough(
+ metric aggregated.Metric,
+ storagePolicy policy.StoragePolicy,
+) error {
+ return c.agg.AddPassthrough(metric, storagePolicy)
+}
+
// Flush flushes any remaining data buffered by the client.
func (c *aggregatorLocalAdminClient) Flush() error {
return nil
@@ -1093,7 +1204,10 @@ var (
}
)
-func bufferForPastTimedMetric(limits []bufferPastLimit, tile time.Duration) time.Duration {
+func bufferForPastTimedMetric(
+ limits []bufferPastLimit,
+ tile time.Duration,
+) time.Duration {
bufferPast := limits[0].bufferPast
for _, limit := range limits {
if tile < limit.upperBound {
@@ -1103,3 +1217,19 @@ func bufferForPastTimedMetric(limits []bufferPastLimit, tile time.Duration) time
}
return bufferPast
}
+
+func maxAllowedForwardingDelay(
+ limits []bufferPastLimit,
+ tile time.Duration,
+ numForwardedTimes int,
+) time.Duration {
+ resolutionForwardDelay := tile * time.Duration(numForwardedTimes)
+ bufferPast := limits[0].bufferPast
+ for _, limit := range limits {
+ if tile < limit.upperBound {
+ return bufferPast + resolutionForwardDelay
+ }
+ bufferPast = limit.bufferPast
+ }
+ return bufferPast + resolutionForwardDelay
+}
diff --git a/src/cmd/services/m3coordinator/downsample/samples_appender.go b/src/cmd/services/m3coordinator/downsample/samples_appender.go
index 5e1ab4595f..18b2455be5 100644
--- a/src/cmd/services/m3coordinator/downsample/samples_appender.go
+++ b/src/cmd/services/m3coordinator/downsample/samples_appender.go
@@ -41,6 +41,9 @@ type samplesAppender struct {
stagedMetadatas metadata.StagedMetadatas
}
+// Ensure samplesAppender implements SamplesAppender.
+var _ SamplesAppender = (*samplesAppender)(nil)
+
func (a samplesAppender) AppendCounterSample(value int64) error {
if a.clientRemote != nil {
// Remote client write instead of local aggregation.
@@ -95,28 +98,21 @@ func (a *samplesAppender) AppendGaugeTimedSample(t time.Time, value float64) err
})
}
+func (a *samplesAppender) AppendTimerTimedSample(t time.Time, value float64) error {
+ return a.appendTimedSample(aggregated.Metric{
+ Type: metric.TimerType,
+ ID: a.unownedID,
+ TimeNanos: t.UnixNano(),
+ Value: value,
+ })
+}
+
func (a *samplesAppender) appendTimedSample(sample aggregated.Metric) error {
- var multiErr xerrors.MultiError
- for _, meta := range a.stagedMetadatas {
- for _, pipeline := range meta.Pipelines {
- for _, policy := range pipeline.StoragePolicies {
- metadata := metadata.TimedMetadata{
- AggregationID: pipeline.AggregationID,
- StoragePolicy: policy,
- }
-
- if a.clientRemote != nil {
- // Remote client write instead of local aggregation.
- multiErr = multiErr.Add(a.clientRemote.WriteTimed(sample, metadata))
- continue
- }
-
- // Add timed to local aggregator.
- multiErr = multiErr.Add(a.agg.AddTimed(sample, metadata))
- }
- }
+ if a.clientRemote != nil {
+ return a.clientRemote.WriteTimedWithStagedMetadatas(sample, a.stagedMetadatas)
}
- return multiErr.LastError()
+
+ return a.agg.AddTimedWithStagedMetadatas(sample, a.stagedMetadatas)
}
// Ensure multiSamplesAppender implements SamplesAppender.
@@ -172,3 +168,11 @@ func (a *multiSamplesAppender) AppendGaugeTimedSample(t time.Time, value float64
}
return multiErr.LastError()
}
+
+func (a *multiSamplesAppender) AppendTimerTimedSample(t time.Time, value float64) error {
+ var multiErr xerrors.MultiError
+ for _, appender := range a.appenders {
+ multiErr = multiErr.Add(appender.AppendTimerTimedSample(t, value))
+ }
+ return multiErr.LastError()
+}
diff --git a/src/cmd/services/m3coordinator/downsample/tags.go b/src/cmd/services/m3coordinator/downsample/tags.go
index 5bcd3100bd..2c5985cb0e 100644
--- a/src/cmd/services/m3coordinator/downsample/tags.go
+++ b/src/cmd/services/m3coordinator/downsample/tags.go
@@ -33,11 +33,13 @@ const (
)
type tags struct {
- names [][]byte
- values [][]byte
- idx int
- nameBuf []byte
- valueBuf []byte
+ names [][]byte
+ values [][]byte
+ idx int
+ nameBuf []byte
+ valueBuf []byte
+ reuseableTagName *ident.ReuseableBytesID
+ reuseableTagValue *ident.ReuseableBytesID
}
// Ensure tags implements TagIterator and sort Interface
@@ -48,9 +50,11 @@ var (
func newTags() *tags {
return &tags{
- names: make([][]byte, 0, initAllocTagsSliceCapacity),
- values: make([][]byte, 0, initAllocTagsSliceCapacity),
- idx: -1,
+ names: make([][]byte, 0, initAllocTagsSliceCapacity),
+ values: make([][]byte, 0, initAllocTagsSliceCapacity),
+ idx: -1,
+ reuseableTagName: ident.NewReuseableBytesID(),
+ reuseableTagValue: ident.NewReuseableBytesID(),
}
}
@@ -59,6 +63,43 @@ func (t *tags) append(name, value []byte) {
t.values = append(t.values, value)
}
+func (t *tags) filterPrefix(prefix []byte) bool {
+ var (
+ modified bool
+ i = 0
+ )
+ for i < len(t.names) {
+ name := t.names[i]
+ // If the tag name has the prefix swap with last element and continue
+ // looping over all the tags.
+ if bytes.HasPrefix(name, prefix) {
+ t.Swap(i, len(t.names)-1)
+ t.names = t.names[:len(t.names)-1]
+ t.values = t.values[:len(t.values)-1]
+ modified = true
+ } else {
+ i++
+ }
+ }
+ // Reset the iterator index.
+ t.reset()
+ return modified
+}
+
+func (t *tags) countPrefix(prefix []byte) int {
+ count := 0
+ for _, name := range t.names {
+ if bytes.HasPrefix(name, prefix) {
+ count++
+ }
+ }
+ return count
+}
+
+func (t *tags) reset() {
+ t.idx = -1
+}
+
func (t *tags) Len() int {
return len(t.names)
}
@@ -90,9 +131,11 @@ func (t *tags) CurrentIndex() int {
func (t *tags) Current() ident.Tag {
t.nameBuf = append(t.nameBuf[:0], t.names[t.idx]...)
t.valueBuf = append(t.valueBuf[:0], t.values[t.idx]...)
+ t.reuseableTagName.Reset(t.nameBuf)
+ t.reuseableTagValue.Reset(t.valueBuf)
return ident.Tag{
- Name: ident.BytesID(t.nameBuf),
- Value: ident.BytesID(t.valueBuf),
+ Name: t.reuseableTagName,
+ Value: t.reuseableTagValue,
}
}
@@ -100,9 +143,7 @@ func (t *tags) Err() error {
return nil
}
-func (t *tags) Close() {
- // No-op
-}
+func (t *tags) Close() {}
func (t *tags) Remaining() int {
if t.idx < 0 {
@@ -115,6 +156,10 @@ func (t *tags) Duplicate() ident.TagIterator {
return &tags{idx: -1, names: t.names, values: t.values}
}
+func (t *tags) Rewind() {
+ t.idx = -1
+}
+
func (t *tags) String() string {
var str strings.Builder
str.WriteString("{")
diff --git a/src/cmd/services/m3coordinator/ingest/m3msg/config.go b/src/cmd/services/m3coordinator/ingest/m3msg/config.go
index 67c06ad46f..9d7c17cce2 100644
--- a/src/cmd/services/m3coordinator/ingest/m3msg/config.go
+++ b/src/cmd/services/m3coordinator/ingest/m3msg/config.go
@@ -21,12 +21,13 @@
package ingestm3msg
import (
+ "github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
- "github.com/m3db/m3/src/x/serialize"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/retry"
"github.com/m3db/m3/src/x/sampler"
+ "github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
)
@@ -43,9 +44,10 @@ type Configuration struct {
// NewIngester creates an ingester with an appender.
func (cfg Configuration) NewIngester(
appender storage.Appender,
+ tagOptions models.TagOptions,
instrumentOptions instrument.Options,
) (*Ingester, error) {
- opts, err := cfg.newOptions(appender, instrumentOptions)
+ opts, err := cfg.newOptions(appender, tagOptions, instrumentOptions)
if err != nil {
return nil, err
}
@@ -54,6 +56,7 @@ func (cfg Configuration) NewIngester(
func (cfg Configuration) newOptions(
appender storage.Appender,
+ tagOptions models.TagOptions,
instrumentOptions instrument.Options,
) (Options, error) {
scope := instrumentOptions.MetricsScope().Tagged(
@@ -70,7 +73,7 @@ func (cfg Configuration) newOptions(
workers.Init()
tagDecoderPool := serialize.NewTagDecoderPool(
- serialize.NewTagDecoderOptions(),
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
pool.NewObjectPoolOptions().
SetInstrumentOptions(instrumentOptions.
SetMetricsScope(instrumentOptions.MetricsScope().
@@ -82,7 +85,7 @@ func (cfg Configuration) newOptions(
if cfg.LogSampleRate != nil {
logSampleRate = *cfg.LogSampleRate
}
- sampler, err := sampler.NewSampler(logSampleRate)
+ sampler, err := sampler.NewSampler(sampler.Rate(logSampleRate))
if err != nil {
return Options{}, err
}
@@ -90,6 +93,7 @@ func (cfg Configuration) newOptions(
Appender: appender,
Workers: workers,
PoolOptions: cfg.OpPool.NewObjectPoolOptions(instrumentOptions),
+ TagOptions: tagOptions,
TagDecoderPool: tagDecoderPool,
RetryOptions: cfg.Retry.NewOptions(scope),
Sampler: sampler,
diff --git a/src/cmd/services/m3coordinator/ingest/m3msg/ingest.go b/src/cmd/services/m3coordinator/ingest/m3msg/ingest.go
index b5c44fab93..8105bd9b3c 100644
--- a/src/cmd/services/m3coordinator/ingest/m3msg/ingest.go
+++ b/src/cmd/services/m3coordinator/ingest/m3msg/ingest.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/convert"
xerrors "github.com/m3db/m3/src/x/errors"
@@ -58,13 +59,19 @@ type Options struct {
}
type ingestMetrics struct {
- ingestError tally.Counter
- ingestSuccess tally.Counter
+ ingestInternalError tally.Counter
+ ingestNonRetryableError tally.Counter
+ ingestSuccess tally.Counter
}
func newIngestMetrics(scope tally.Scope) ingestMetrics {
return ingestMetrics{
- ingestError: scope.Counter("ingest-error"),
+ ingestInternalError: scope.Tagged(map[string]string{
+ "error_type": "internal_error",
+ }).Counter("ingest-error"),
+ ingestNonRetryableError: scope.Tagged(map[string]string{
+ "error_type": "non_retryable_error",
+ }).Counter("ingest-error"),
ingestSuccess: scope.Counter("ingest-success"),
}
}
@@ -100,9 +107,6 @@ func NewIngester(
m: m,
logger: opts.InstrumentOptions.Logger(),
sampler: opts.Sampler,
- q: storage.WriteQuery{
- Tags: models.NewTags(0, tagOpts),
- },
}
op.attemptFn = op.attempt
op.ingestFn = op.ingest
@@ -152,6 +156,8 @@ type ingestOp struct {
value float64
sp policy.StoragePolicy
callback m3msg.Callbackable
+ tags models.Tags
+ datapoints ts.Datapoints
q storage.WriteQuery
}
@@ -164,7 +170,7 @@ func (op *ingestOp) sample() bool {
func (op *ingestOp) ingest() {
if err := op.resetWriteQuery(); err != nil {
- op.m.ingestError.Inc(1)
+ op.m.ingestInternalError.Inc(1)
op.callback.Callback(m3msg.OnRetriableError)
op.p.Put(op)
if op.sample() {
@@ -173,16 +179,25 @@ func (op *ingestOp) ingest() {
return
}
if err := op.r.Attempt(op.attemptFn); err != nil {
- if xerrors.IsNonRetryableError(err) {
+ nonRetryableErr := xerrors.IsNonRetryableError(err)
+ if nonRetryableErr {
op.callback.Callback(m3msg.OnNonRetriableError)
+ op.m.ingestNonRetryableError.Inc(1)
} else {
op.callback.Callback(m3msg.OnRetriableError)
+ op.m.ingestInternalError.Inc(1)
}
- op.m.ingestError.Inc(1)
- op.p.Put(op)
- if op.sample() {
- op.logger.Error("could not write ingest op", zap.Error(err))
+
+ // NB(r): Always log non-retriable errors since they are usually
+ // a very small minority and when they go missing it can be frustrating
+ // not being able to find them (usually bad request errors).
+ if nonRetryableErr || op.sample() {
+ op.logger.Error("could not write ingest op",
+ zap.Error(err),
+ zap.Bool("retryableError", !nonRetryableErr))
}
+
+ op.p.Put(op)
return
}
op.m.ingestSuccess.Inc(1)
@@ -199,17 +214,22 @@ func (op *ingestOp) resetWriteQuery() error {
return err
}
op.resetDataPoints()
- op.q.Unit = convert.UnitForM3DB(op.sp.Resolution().Precision)
- op.q.Attributes.MetricsType = storage.AggregatedMetricsType
- op.q.Attributes.Resolution = op.sp.Resolution().Window
- op.q.Attributes.Retention = op.sp.Retention().Duration()
- return nil
+ return op.q.Reset(storage.WriteQueryOptions{
+ Tags: op.tags,
+ Datapoints: op.datapoints,
+ Unit: convert.UnitForM3DB(op.sp.Resolution().Precision),
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: op.sp.Resolution().Window,
+ Retention: op.sp.Retention().Duration(),
+ },
+ })
}
func (op *ingestOp) resetTags() error {
op.it.Reset(op.id)
- op.q.Tags.Tags = op.q.Tags.Tags[:0]
- op.q.Tags.Opts = op.tagOpts
+ op.tags.Tags = op.tags.Tags[:0]
+ op.tags.Opts = op.tagOpts
for op.it.Next() {
name, value := op.it.Current()
@@ -219,30 +239,30 @@ func (op *ingestOp) resetTags() error {
// and this tag is interpreted, eventually need to handle more cleanly.
if bytes.Equal(name, downsample.MetricsOptionIDSchemeTagName) {
if bytes.Equal(value, downsample.GraphiteIDSchemeTagValue) &&
- op.q.Tags.Opts.IDSchemeType() != models.TypeGraphite {
+ op.tags.Opts.IDSchemeType() != models.TypeGraphite {
// Restart iteration with graphite tag options parsing
op.it.Reset(op.id)
- op.q.Tags.Tags = op.q.Tags.Tags[:0]
- op.q.Tags.Opts = op.q.Tags.Opts.SetIDSchemeType(models.TypeGraphite)
+ op.tags.Tags = op.tags.Tags[:0]
+ op.tags.Opts = op.tags.Opts.SetIDSchemeType(models.TypeGraphite)
}
// Continue, whether we updated and need to restart iteration,
// or if passing for the second time
continue
}
- op.q.Tags = op.q.Tags.AddTagWithoutNormalizing(models.Tag{
+ op.tags = op.tags.AddTagWithoutNormalizing(models.Tag{
Name: name,
Value: value,
}.Clone())
}
- op.q.Tags.Normalize()
+ op.tags.Normalize()
return op.it.Err()
}
func (op *ingestOp) resetDataPoints() {
- if len(op.q.Datapoints) != 1 {
- op.q.Datapoints = make(ts.Datapoints, 1)
+ if len(op.datapoints) != 1 {
+ op.datapoints = make(ts.Datapoints, 1)
}
- op.q.Datapoints[0].Timestamp = time.Unix(0, op.metricNanos)
- op.q.Datapoints[0].Value = op.value
+ op.datapoints[0].Timestamp = time.Unix(0, op.metricNanos)
+ op.datapoints[0].Value = op.value
}
diff --git a/src/cmd/services/m3coordinator/ingest/m3msg/ingest_test.go b/src/cmd/services/m3coordinator/ingest/m3msg/ingest_test.go
index 5118ca1537..22babfd397 100644
--- a/src/cmd/services/m3coordinator/ingest/m3msg/ingest_test.go
+++ b/src/cmd/services/m3coordinator/ingest/m3msg/ingest_test.go
@@ -22,6 +22,7 @@ package ingestm3msg
import (
"context"
+ "errors"
"sync"
"testing"
"time"
@@ -32,15 +33,18 @@ import (
"github.com/m3db/m3/src/msg/consumer"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
- "github.com/m3db/m3/src/x/serialize"
+ xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
+ "github.com/m3db/m3/src/x/serialize"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
+ "github.com/uber-go/tally"
)
func TestIngest(t *testing.T) {
@@ -54,7 +58,8 @@ func TestIngest(t *testing.T) {
},
}
appender := &mockAppender{}
- ingester, err := cfg.NewIngester(appender, instrument.NewOptions())
+ ingester, err := cfg.NewIngester(appender, models.NewTagOptions(),
+ instrument.NewOptions())
require.NoError(t, err)
id := newTestID(t, "__name__", "foo", "app", "bar")
@@ -73,47 +78,92 @@ func TestIngest(t *testing.T) {
time.Sleep(100 * time.Millisecond)
}
- require.Equal(t,
- storage.WriteQuery{
- Annotation: nil,
- Attributes: storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
- Resolution: time.Minute,
- Retention: 40 * 24 * time.Hour,
+ expected, err := storage.NewWriteQuery(storage.WriteQueryOptions{
+ Annotation: nil,
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: time.Minute,
+ Retention: 40 * 24 * time.Hour,
+ },
+ Datapoints: ts.Datapoints{
+ ts.Datapoint{
+ Timestamp: time.Unix(0, metricNanos),
+ Value: val,
},
- Datapoints: ts.Datapoints{
- ts.Datapoint{
- Timestamp: time.Unix(0, metricNanos),
- Value: val,
+ },
+ Tags: models.NewTags(2, nil).AddTags(
+ []models.Tag{
+ models.Tag{
+ Name: []byte("__name__"),
+ Value: []byte("foo"),
},
- },
- Tags: models.NewTags(2, nil).AddTags(
- []models.Tag{
- models.Tag{
- Name: []byte("__name__"),
- Value: []byte("foo"),
- },
- {
- Name: []byte("app"),
- Value: []byte("bar"),
- },
+ {
+ Name: []byte("app"),
+ Value: []byte("bar"),
},
- ),
- Unit: xtime.Second,
- },
- *appender.received[0],
- )
+ },
+ ),
+ Unit: xtime.Second,
+ })
+ require.NoError(t, err)
+
+ require.Equal(t, *expected, *appender.received[0])
// Make sure the op is put back to pool.
op := ingester.p.Get().(*ingestOp)
require.Equal(t, id, op.id)
}
+func TestIngestNonRetryableError(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ cfg := Configuration{
+ WorkerPoolSize: 2,
+ OpPool: pool.ObjectPoolConfiguration{
+ Size: 1,
+ },
+ }
+
+ scope := tally.NewTestScope("", nil)
+ instrumentOpts := instrument.NewOptions().SetMetricsScope(scope)
+
+ nonRetryableError := xerrors.NewNonRetryableError(errors.New("bad request error"))
+ appender := &mockAppender{expectErr: nonRetryableError}
+ ingester, err := cfg.NewIngester(appender, models.NewTagOptions(),
+ instrumentOpts)
+ require.NoError(t, err)
+
+ id := newTestID(t, "__name__", "foo", "app", "bar")
+ metricNanos := int64(1234)
+ val := float64(1)
+ sp := policy.MustParseStoragePolicy("1m:40d")
+ m := consumer.NewMockMessage(ctrl)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ callback := m3msg.NewProtobufCallback(m, protobuf.NewAggregatedDecoder(nil), &wg)
+
+ m.EXPECT().Ack()
+ ingester.Ingest(context.TODO(), id, metricNanos, 0, val, sp, callback)
+
+ for appender.cntErr() != 1 {
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // Make non-retryable error marked.
+ counters := scope.Snapshot().Counters()
+
+ counter, ok := counters["errors+component=ingester,type=not-retryable"]
+ require.True(t, ok)
+ require.Equal(t, int64(1), counter.Value())
+}
+
type mockAppender struct {
sync.RWMutex
- expectErr error
- received []*storage.WriteQuery
+ expectErr error
+ receivedErr []*storage.WriteQuery
+ received []*storage.WriteQuery
}
func (m *mockAppender) Write(ctx context.Context, query *storage.WriteQuery) error {
@@ -121,6 +171,7 @@ func (m *mockAppender) Write(ctx context.Context, query *storage.WriteQuery) err
defer m.Unlock()
if m.expectErr != nil {
+ m.receivedErr = append(m.receivedErr, query)
return m.expectErr
}
m.received = append(m.received, query)
@@ -134,6 +185,13 @@ func (m *mockAppender) cnt() int {
return len(m.received)
}
+func (m *mockAppender) cntErr() int {
+ m.Lock()
+ defer m.Unlock()
+
+ return len(m.receivedErr)
+}
+
func newTestID(t *testing.T, tags ...string) []byte {
tagEncoderPool := serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(),
pool.NewObjectPoolOptions().SetSize(1))
diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go
index dc8a5717ce..9889c23d89 100644
--- a/src/cmd/services/m3coordinator/ingest/write.go
+++ b/src/cmd/services/m3coordinator/ingest/write.go
@@ -28,10 +28,14 @@ import (
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
xerrors "github.com/m3db/m3/src/x/errors"
+ "github.com/m3db/m3/src/x/instrument"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/uber-go/tally"
)
var (
@@ -41,13 +45,24 @@ var (
}
)
+// IterValue is the value returned by the iterator.
+type IterValue struct {
+ Tags models.Tags
+ Datapoints ts.Datapoints
+ Attributes ts.SeriesAttributes
+ Unit xtime.Unit
+ Metadata ts.Metadata
+ Annotation []byte
+}
+
// DownsampleAndWriteIter is an interface that can be implemented to use
// the WriteBatch method.
type DownsampleAndWriteIter interface {
Next() bool
- Current() (models.Tags, ts.Datapoints, xtime.Unit, []byte)
+ Current() IterValue
Reset() error
Error() error
+ SetCurrentMetadata(ts.Metadata)
}
// DownsamplerAndWriter is the interface for the downsamplerAndWriter which
@@ -88,12 +103,18 @@ type WriteOptions struct {
WriteOverride bool
}
+type downsamplerAndWriterMetrics struct {
+ dropped tally.Counter
+}
+
// downsamplerAndWriter encapsulates the logic for writing data to the downsampler,
// as well as in unaggregated form to storage.
type downsamplerAndWriter struct {
store storage.Storage
downsampler downsample.Downsampler
workerPool xsync.PooledWorkerPool
+
+ metrics downsamplerAndWriterMetrics
}
// NewDownsamplerAndWriter creates a new downsampler and writer.
@@ -101,11 +122,16 @@ func NewDownsamplerAndWriter(
store storage.Storage,
downsampler downsample.Downsampler,
workerPool xsync.PooledWorkerPool,
+ instrumentOpts instrument.Options,
) DownsamplerAndWriter {
+ scope := instrumentOpts.MetricsScope().SubScope("downsampler")
return &downsamplerAndWriter{
store: store,
downsampler: downsampler,
workerPool: workerPool,
+ metrics: downsamplerAndWriterMetrics{
+ dropped: scope.Counter("metrics_dropped"),
+ },
}
}
@@ -117,15 +143,22 @@ func (d *downsamplerAndWriter) Write(
annotation []byte,
overrides WriteOptions,
) error {
- multiErr := xerrors.NewMultiError()
+ var (
+ multiErr = xerrors.NewMultiError()
+ dropUnaggregated bool
+ )
+
if d.shouldDownsample(overrides) {
- err := d.writeToDownsampler(tags, datapoints, unit, overrides)
+ var err error
+ dropUnaggregated, err = d.writeToDownsampler(tags, datapoints, unit, overrides)
if err != nil {
multiErr = multiErr.Add(err)
}
}
- if d.shouldWrite(overrides) {
+ if dropUnaggregated {
+ d.metrics.dropped.Inc(1)
+ } else if d.shouldWrite(overrides) {
err := d.writeToStorage(ctx, tags, datapoints, unit, annotation, overrides)
if err != nil {
multiErr = multiErr.Add(err)
@@ -195,12 +228,14 @@ func (d *downsamplerAndWriter) writeToDownsampler(
datapoints ts.Datapoints,
unit xtime.Unit,
overrides WriteOptions,
-) error {
- // TODO(rartoul): MetricsAppender has a Finalize() method, but it does not actually reuse many
- // resources. If we can pool this properly we can get a nice speedup.
+) (bool, error) {
+ if err := tags.Validate(); err != nil {
+ return false, err
+ }
+
appender, err := d.downsampler.NewMetricsAppender()
if err != nil {
- return err
+ return false, err
}
defer appender.Finalize()
@@ -234,19 +269,19 @@ func (d *downsamplerAndWriter) writeToDownsampler(
}
}
- samplesAppender, err := appender.SamplesAppender(appenderOpts)
+ result, err := appender.SamplesAppender(appenderOpts)
if err != nil {
- return err
+ return false, err
}
for _, dp := range datapoints {
- err := samplesAppender.AppendGaugeTimedSample(dp.Timestamp, dp.Value)
+ err := result.SamplesAppender.AppendGaugeTimedSample(dp.Timestamp, dp.Value)
if err != nil {
- return err
+ return result.IsDropPolicyApplied, err
}
}
- return nil
+ return result.IsDropPolicyApplied, nil
}
func (d *downsamplerAndWriter) writeToStorage(
@@ -259,7 +294,7 @@ func (d *downsamplerAndWriter) writeToStorage(
) error {
storagePolicies, ok := d.writeOverrideStoragePolicies(overrides)
if !ok {
- return d.store.Write(ctx, &storage.WriteQuery{
+ return d.writeWithOptions(ctx, storage.WriteQueryOptions{
Tags: tags,
Datapoints: datapoints,
Unit: unit,
@@ -279,7 +314,7 @@ func (d *downsamplerAndWriter) writeToStorage(
wg.Add(1)
d.workerPool.Go(func() {
- err := d.store.Write(ctx, &storage.WriteQuery{
+ err := d.writeWithOptions(ctx, storage.WriteQueryOptions{
Tags: tags,
Datapoints: datapoints,
Unit: unit,
@@ -299,6 +334,17 @@ func (d *downsamplerAndWriter) writeToStorage(
return multiErr.FinalError()
}
+func (d *downsamplerAndWriter) writeWithOptions(
+ ctx context.Context,
+ opts storage.WriteQueryOptions,
+) error {
+ writeQuery, err := storage.NewWriteQuery(opts)
+ if err != nil {
+ return err
+ }
+ return d.store.Write(ctx, writeQuery)
+}
+
func (d *downsamplerAndWriter) WriteBatch(
ctx context.Context,
iter DownsampleAndWriteIter,
@@ -315,7 +361,24 @@ func (d *downsamplerAndWriter) WriteBatch(
}
)
- if d.shouldWrite(overrides) {
+ if d.shouldDownsample(overrides) {
+ if errs := d.writeAggregatedBatch(iter, overrides); !errs.Empty() {
+ // Iterate and add through all the error to the multi error. It is
+ // ok not to use the addError method here as we are running single
+ // threaded at this point.
+ for _, err := range errs.Errors() {
+ multiErr = multiErr.Add(err)
+ }
+ }
+ }
+
+ // Reset the iter to write the unaggregated data.
+ resetErr := iter.Reset()
+ if resetErr != nil {
+ addError(resetErr)
+ }
+
+ if d.shouldWrite(overrides) && resetErr == nil {
// Write unaggregated. Spin up all the background goroutines that make
// network requests before we do the synchronous work of writing to the
// downsampler.
@@ -325,16 +388,20 @@ func (d *downsamplerAndWriter) WriteBatch(
}
for iter.Next() {
- tags, datapoints, unit, annotation := iter.Current()
+ value := iter.Current()
+ if value.Metadata.DropUnaggregated {
+ d.metrics.dropped.Inc(1)
+ continue
+ }
for _, p := range storagePolicies {
p := p // Capture for lambda.
wg.Add(1)
d.workerPool.Go(func() {
- err := d.store.Write(ctx, &storage.WriteQuery{
- Tags: tags,
- Datapoints: datapoints,
- Unit: unit,
- Annotation: annotation,
+ err := d.writeWithOptions(ctx, storage.WriteQueryOptions{
+ Tags: value.Tags,
+ Datapoints: value.Datapoints,
+ Unit: value.Unit,
+ Annotation: value.Annotation,
Attributes: storageAttributesFromPolicy(p),
})
if err != nil {
@@ -346,19 +413,6 @@ func (d *downsamplerAndWriter) WriteBatch(
}
}
- // Iter does not need to be synchronized because even though we use it to spawn
- // many goroutines above, the iteration is always synchronous.
- resetErr := iter.Reset()
- if resetErr != nil {
- addError(resetErr)
- }
-
- if d.shouldDownsample(overrides) && resetErr == nil {
- if err := d.writeAggregatedBatch(iter, overrides); err != nil {
- addError(err)
- }
- }
-
wg.Wait()
if multiErr.NumErrors() == 0 {
return nil
@@ -370,22 +424,46 @@ func (d *downsamplerAndWriter) WriteBatch(
func (d *downsamplerAndWriter) writeAggregatedBatch(
iter DownsampleAndWriteIter,
overrides WriteOptions,
-) error {
+) xerrors.MultiError {
+ var multiErr xerrors.MultiError
appender, err := d.downsampler.NewMetricsAppender()
if err != nil {
- return err
+ return multiErr.Add(err)
}
defer appender.Finalize()
for iter.Next() {
- appender.Reset()
- tags, datapoints, _, _ := iter.Current()
- for _, tag := range tags.Tags {
+ appender.NextMetric()
+
+ value := iter.Current()
+ if err := value.Tags.Validate(); err != nil {
+ multiErr = multiErr.Add(err)
+ continue
+ }
+
+ for _, tag := range value.Tags.Tags {
appender.AddTag(tag.Name, tag.Value)
}
- var opts downsample.SampleAppenderOptions
+ if value.Tags.Opts.IDSchemeType() == models.TypeGraphite {
+ // NB(r): This is gross, but if this is a graphite metric then
+ // we are going to set a special tag that means the downsampler
+ // will write a graphite ID. This should really be plumbed
+ // through the downsampler in general, but right now the aggregator
+ // does not allow context to be attached to a metric so when it calls
+ // back the context is lost currently.
+ // TODO_FIX_GRAPHITE_TAGGING: Using this string constant to track
+ // all places worth fixing this hack. There is at least one
+ // other path where flows back to the coordinator from the aggregator
+ // and this tag is interpreted, eventually need to handle more cleanly.
+ appender.AddTag(downsample.MetricsOptionIDSchemeTagName,
+ downsample.GraphiteIDSchemeTagValue)
+ }
+
+ opts := downsample.SampleAppenderOptions{
+ MetricType: value.Attributes.Type,
+ }
if downsampleMappingRuleOverrides, ok := d.downsampleOverrideRules(overrides); ok {
opts = downsample.SampleAppenderOptions{
Override: true,
@@ -395,20 +473,34 @@ func (d *downsamplerAndWriter) writeAggregatedBatch(
}
}
- samplesAppender, err := appender.SamplesAppender(opts)
+ result, err := appender.SamplesAppender(opts)
if err != nil {
- return err
+ multiErr = multiErr.Add(err)
+ continue
+ }
+
+ if result.IsDropPolicyApplied {
+ iter.SetCurrentMetadata(ts.Metadata{DropUnaggregated: true})
}
- for _, dp := range datapoints {
- err := samplesAppender.AppendGaugeTimedSample(dp.Timestamp, dp.Value)
+ for _, dp := range value.Datapoints {
+ switch value.Attributes.Type {
+ case ts.MetricTypeGauge:
+ err = result.SamplesAppender.AppendGaugeTimedSample(dp.Timestamp, dp.Value)
+ case ts.MetricTypeCounter:
+ err = result.SamplesAppender.AppendCounterTimedSample(dp.Timestamp, int64(dp.Value))
+ case ts.MetricTypeTimer:
+ err = result.SamplesAppender.AppendTimerTimedSample(dp.Timestamp, dp.Value)
+ }
if err != nil {
- return err
+ // If we see an error break out so we can try processing the
+ // next datapoint.
+ multiErr = multiErr.Add(err)
}
}
}
- return iter.Error()
+ return multiErr.Add(iter.Error())
}
func (d *downsamplerAndWriter) Storage() storage.Storage {
@@ -417,14 +509,14 @@ func (d *downsamplerAndWriter) Storage() storage.Storage {
func storageAttributesFromPolicy(
p policy.StoragePolicy,
-) storage.Attributes {
- attributes := storage.Attributes{
- MetricsType: storage.UnaggregatedMetricsType,
+) storagemetadata.Attributes {
+ attributes := storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
}
if p != unaggregatedStoragePolicy {
- attributes = storage.Attributes{
+ attributes = storagemetadata.Attributes{
// Assume all overridden storage policies are for aggregated namespaces.
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: p.Resolution().Window,
Retention: p.Retention().Duration(),
}
diff --git a/src/cmd/services/m3coordinator/ingest/write_mock.go b/src/cmd/services/m3coordinator/ingest/write_mock.go
index ecb45c3a1e..c168813c91 100644
--- a/src/cmd/services/m3coordinator/ingest/write_mock.go
+++ b/src/cmd/services/m3coordinator/ingest/write_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/cmd/services/m3coordinator/ingest (interfaces: DownsamplerAndWriter)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/cmd/services/m3coordinator/ingest/write_test.go b/src/cmd/services/m3coordinator/ingest/write_test.go
index 5eaf74120a..43281876c1 100644
--- a/src/cmd/services/m3coordinator/ingest/write_test.go
+++ b/src/cmd/services/m3coordinator/ingest/write_test.go
@@ -35,7 +35,9 @@ import (
"github.com/m3db/m3/src/query/storage/m3"
testm3 "github.com/m3db/m3/src/query/test/m3"
"github.com/m3db/m3/src/query/ts"
+ xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/x/instrument"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
@@ -79,6 +81,20 @@ var (
},
},
)
+ testBadTags = models.NewTags(3, nil).AddTags([]models.Tag{
+ {
+ Name: []byte("standard_tag"),
+ Value: []byte("standard_tag_value"),
+ },
+ {
+ Name: []byte("duplicate_tag"),
+ Value: []byte("duplicate_tag_value0"),
+ },
+ {
+ Name: []byte("duplicate_tag"),
+ Value: []byte("duplicate_tag_value1"),
+ },
+ })
testDatapoints1 = []ts.Datapoint{
{
@@ -112,9 +128,24 @@ var (
testAnnotation1 = []byte("first")
testAnnotation2 = []byte("second")
+ testAttributesGauge = ts.SeriesAttributes{
+ Type: ts.MetricTypeGauge,
+ }
+ testAttributesCounter = ts.SeriesAttributes{
+ Type: ts.MetricTypeCounter,
+ }
+ testAttributesTimer = ts.SeriesAttributes{
+ Type: ts.MetricTypeTimer,
+ }
+
testEntries = []testIterEntry{
- {tags: testTags1, datapoints: testDatapoints1, annotation: testAnnotation1},
- {tags: testTags2, datapoints: testDatapoints2, annotation: testAnnotation2},
+ {tags: testTags1, datapoints: testDatapoints1, attributes: testAttributesGauge, annotation: testAnnotation1},
+ {tags: testTags2, datapoints: testDatapoints2, attributes: testAttributesGauge, annotation: testAnnotation2},
+ }
+
+ testEntries2 = []testIterEntry{
+ {tags: testTags1, datapoints: testDatapoints1, attributes: testAttributesCounter, annotation: testAnnotation1},
+ {tags: testTags2, datapoints: testDatapoints2, attributes: testAttributesTimer, annotation: testAnnotation2},
}
defaultOverride = WriteOptions{}
@@ -123,20 +154,23 @@ var (
)
type testIter struct {
- idx int
- entries []testIterEntry
+ idx int
+ entries []testIterEntry
+ metadatas []ts.Metadata
}
type testIterEntry struct {
tags models.Tags
datapoints []ts.Datapoint
annotation []byte
+ attributes ts.SeriesAttributes
}
func newTestIter(entries []testIterEntry) *testIter {
return &testIter{
- idx: -1,
- entries: entries,
+ idx: -1,
+ entries: entries,
+ metadatas: make([]ts.Metadata, 10),
}
}
@@ -145,13 +179,26 @@ func (i *testIter) Next() bool {
return i.idx < len(i.entries)
}
-func (i *testIter) Current() (models.Tags, ts.Datapoints, xtime.Unit, []byte) {
+func (i *testIter) Current() IterValue {
if len(i.entries) == 0 || i.idx < 0 || i.idx >= len(i.entries) {
- return models.EmptyTags(), nil, 0, nil
+ return IterValue{
+ Tags: models.EmptyTags(),
+ Attributes: ts.DefaultSeriesAttributes(),
+ }
}
curr := i.entries[i.idx]
- return curr.tags, curr.datapoints, xtime.Second, curr.annotation
+ value := IterValue{
+ Tags: curr.tags,
+ Datapoints: curr.datapoints,
+ Attributes: curr.attributes,
+ Unit: xtime.Second,
+ Annotation: curr.annotation,
+ }
+ if i.idx < len(i.metadatas) {
+ value.Metadata = i.metadatas[i.idx]
+ }
+ return value
}
func (i *testIter) Reset() error {
@@ -163,6 +210,10 @@ func (i *testIter) Error() error {
return nil
}
+func (i *testIter) SetCurrentMetadata(metadata ts.Metadata) {
+ i.metadatas[i.idx] = metadata
+}
+
func TestDownsampleAndWrite(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -178,6 +229,28 @@ func TestDownsampleAndWrite(t *testing.T) {
require.NoError(t, err)
}
+func TestDownsampleAndWriteWithBadTags(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ downAndWrite, _, _ := newTestDownsamplerAndWriter(t, ctrl,
+ testDownsamplerAndWriterOptions{})
+
+ err := downAndWrite.Write(
+ context.Background(), testBadTags, testDatapoints1, xtime.Second, testAnnotation1, defaultOverride)
+ require.Error(t, err)
+
+ // Make sure we get a validation error for downsample code path
+ // and for the raw unaggregate write code path.
+ multiErr, ok := err.(xerrors.MultiError)
+ require.True(t, ok)
+ require.Equal(t, 2, multiErr.NumErrors())
+ // Make sure all are invalid params errors.
+ for _, err := range multiErr.Errors() {
+ require.True(t, xerrors.IsInvalidParams(err))
+ }
+}
+
func TestDownsampleAndWriteWithDownsampleOverridesAndNoMappingRules(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -237,6 +310,61 @@ func TestDownsampleAndWriteWithDownsampleOverridesAndMappingRules(t *testing.T)
require.NoError(t, err)
}
+func TestDownsampleAndWriteWithDownsampleOverridesAndDropMappingRules(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ downAndWrite, downsampler, _ := newTestDownsamplerAndWriter(t, ctrl,
+ testDownsamplerAndWriterOptions{})
+
+ // We're overriding the downsampling with mapping rules, so we expect data to be
+ // sent to the downsampler, as well as everything being written to storage.
+ mappingRules := []downsample.AutoMappingRule{
+ {
+ Aggregations: []aggregation.Type{aggregation.Mean},
+ Policies: []policy.StoragePolicy{
+ policy.NewStoragePolicy(
+ time.Minute, xtime.Second, 48*time.Hour),
+ },
+ },
+ }
+ overrides := WriteOptions{
+ DownsampleOverride: true,
+ DownsampleMappingRules: mappingRules,
+ }
+
+ expectedSamplesAppenderOptions := downsample.SampleAppenderOptions{
+ Override: true,
+ OverrideRules: downsample.SamplesAppenderOverrideRules{
+ MappingRules: mappingRules,
+ },
+ }
+
+ var (
+ mockSamplesAppender = downsample.NewMockSamplesAppender(ctrl)
+ mockMetricsAppender = downsample.NewMockMetricsAppender(ctrl)
+ )
+
+ mockMetricsAppender.
+ EXPECT().
+ SamplesAppender(expectedSamplesAppenderOptions).
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender, IsDropPolicyApplied: true}, nil)
+ for _, tag := range testTags1.Tags {
+ mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
+ }
+
+ for _, dp := range testDatapoints1 {
+ mockSamplesAppender.EXPECT().AppendGaugeTimedSample(dp.Timestamp, dp.Value)
+ }
+ downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil)
+
+ mockMetricsAppender.EXPECT().Finalize()
+
+ err := downAndWrite.Write(
+ context.Background(), testTags1, testDatapoints1, xtime.Second, testAnnotation1, overrides)
+ require.NoError(t, err)
+}
+
func TestDownsampleAndWriteWithWriteOverridesAndNoStoragePolicies(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -335,7 +463,7 @@ func TestDownsampleAndWriteBatch(t *testing.T) {
mockMetricsAppender.
EXPECT().
SamplesAppender(zeroDownsamplerAppenderOpts).
- Return(mockSamplesAppender, nil).Times(2)
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender}, nil).Times(2)
for _, tag := range testTags1.Tags {
mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
}
@@ -350,7 +478,7 @@ func TestDownsampleAndWriteBatch(t *testing.T) {
}
downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil)
- mockMetricsAppender.EXPECT().Reset().Times(2)
+ mockMetricsAppender.EXPECT().NextMetric().Times(2)
mockMetricsAppender.EXPECT().Finalize()
for _, entry := range testEntries {
@@ -366,6 +494,161 @@ func TestDownsampleAndWriteBatch(t *testing.T) {
require.NoError(t, err)
}
+func TestDownsampleAndWriteBatchBadTags(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl,
+ testDownsamplerAndWriterOptions{})
+
+ var (
+ mockSamplesAppender = downsample.NewMockSamplesAppender(ctrl)
+ mockMetricsAppender = downsample.NewMockMetricsAppender(ctrl)
+ )
+
+ entries := []testIterEntry{
+ {tags: testBadTags, datapoints: testDatapoints1, attributes: testAttributesGauge, annotation: testAnnotation1},
+ {tags: testTags2, datapoints: testDatapoints2, attributes: testAttributesGauge, annotation: testAnnotation2},
+ }
+
+ // Only expect to write non-bad tags.
+ mockMetricsAppender.
+ EXPECT().
+ SamplesAppender(zeroDownsamplerAppenderOpts).
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender}, nil).Times(1)
+ for _, tag := range testTags2.Tags {
+ mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
+ }
+ for _, dp := range testDatapoints2 {
+ mockSamplesAppender.EXPECT().AppendGaugeTimedSample(dp.Timestamp, dp.Value)
+ }
+ downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil)
+
+ mockMetricsAppender.EXPECT().NextMetric().Times(2)
+ mockMetricsAppender.EXPECT().Finalize()
+
+ // Only expect to write non-bad tags.
+ for _, entry := range testEntries[1:] {
+ for _, dp := range entry.datapoints {
+ session.EXPECT().WriteTagged(
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), entry.annotation,
+ )
+ }
+ }
+
+ iter := newTestIter(entries)
+ err := downAndWrite.WriteBatch(context.Background(), iter, WriteOptions{})
+ require.Error(t, err)
+
+ // Make sure we get a validation error for downsample code path
+ // and for the raw unaggregate write code path.
+ multiErr, ok := err.(xerrors.MultiError)
+ require.True(t, ok)
+ require.Equal(t, 2, multiErr.NumErrors())
+ // Make sure all are invalid params errors.
+ for _, err := range multiErr.Errors() {
+ require.True(t, xerrors.IsInvalidParams(err))
+ }
+}
+
+func TestDownsampleAndWriteBatchDifferentTypes(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl,
+ testDownsamplerAndWriterOptions{})
+
+ var (
+ mockSamplesAppender = downsample.NewMockSamplesAppender(ctrl)
+ mockMetricsAppender = downsample.NewMockMetricsAppender(ctrl)
+ )
+
+ mockMetricsAppender.
+ EXPECT().
+ SamplesAppender(downsample.SampleAppenderOptions{MetricType: ts.MetricTypeCounter}).
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender}, nil).Times(1)
+ mockMetricsAppender.
+ EXPECT().
+ SamplesAppender(downsample.SampleAppenderOptions{MetricType: ts.MetricTypeTimer}).
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender}, nil).Times(1)
+ for _, tag := range testTags1.Tags {
+ mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
+ }
+ for _, dp := range testDatapoints1 {
+ mockSamplesAppender.EXPECT().AppendCounterTimedSample(dp.Timestamp, int64(dp.Value))
+ }
+ for _, tag := range testTags2.Tags {
+ mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
+ }
+ for _, dp := range testDatapoints2 {
+ mockSamplesAppender.EXPECT().AppendTimerTimedSample(dp.Timestamp, dp.Value)
+ }
+ downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil)
+
+ mockMetricsAppender.EXPECT().NextMetric().Times(2)
+ mockMetricsAppender.EXPECT().Finalize()
+
+ for _, entry := range testEntries2 {
+ for _, dp := range entry.datapoints {
+ session.EXPECT().WriteTagged(
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), entry.annotation,
+ )
+ }
+ }
+
+ iter := newTestIter(testEntries2)
+ err := downAndWrite.WriteBatch(context.Background(), iter, WriteOptions{})
+ require.NoError(t, err)
+}
+
+func TestDownsampleAndWriteBatchSingleDrop(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl,
+ testDownsamplerAndWriterOptions{})
+
+ var (
+ mockSamplesAppender = downsample.NewMockSamplesAppender(ctrl)
+ mockMetricsAppender = downsample.NewMockMetricsAppender(ctrl)
+ )
+
+ mockMetricsAppender.
+ EXPECT().
+ SamplesAppender(zeroDownsamplerAppenderOpts).
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender, IsDropPolicyApplied: true}, nil).Times(1)
+ mockMetricsAppender.
+ EXPECT().
+ SamplesAppender(zeroDownsamplerAppenderOpts).
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender}, nil).Times(1)
+ for _, tag := range testTags1.Tags {
+ mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
+ }
+ for _, dp := range testDatapoints1 {
+ mockSamplesAppender.EXPECT().AppendGaugeTimedSample(dp.Timestamp, dp.Value)
+ }
+ for _, tag := range testTags2.Tags {
+ mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
+ }
+ for _, dp := range testDatapoints2 {
+ mockSamplesAppender.EXPECT().AppendGaugeTimedSample(dp.Timestamp, dp.Value)
+ }
+ downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil)
+
+ mockMetricsAppender.EXPECT().NextMetric().Times(2)
+ mockMetricsAppender.EXPECT().Finalize()
+
+ for _, dp := range testEntries[1].datapoints {
+ session.EXPECT().WriteTagged(
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), testEntries[1].annotation,
+ )
+ }
+
+ iter := newTestIter(testEntries)
+ err := downAndWrite.WriteBatch(context.Background(), iter, WriteOptions{})
+ require.NoError(t, err)
+}
+
func TestDownsampleAndWriteBatchNoDownsampler(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -417,21 +700,21 @@ func TestDownsampleAndWriteBatchOverrideDownsampleRules(t *testing.T) {
MappingRules: overrideMappingRules,
},
}).
- Return(mockSamplesAppender, nil)
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender}, nil)
entries := testEntries[:1]
for _, entry := range entries {
for _, tag := range entry.tags.Tags {
mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
}
+ // We will also get the common gauge tag.
for _, dp := range entry.datapoints {
mockSamplesAppender.EXPECT().AppendGaugeTimedSample(dp.Timestamp, dp.Value)
}
}
-
downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil)
- mockMetricsAppender.EXPECT().Reset()
+ mockMetricsAppender.EXPECT().NextMetric()
mockMetricsAppender.EXPECT().Finalize()
iter := newTestIter(entries)
@@ -500,7 +783,7 @@ func expectDefaultDownsampling(
mockMetricsAppender.
EXPECT().
SamplesAppender(downsampleOpts).
- Return(mockSamplesAppender, nil)
+ Return(downsample.SamplesAppenderResult{SamplesAppender: mockSamplesAppender}, nil)
for _, tag := range testTags1.Tags {
mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value)
}
@@ -539,7 +822,7 @@ func newTestDownsamplerAndWriter(
storage, session = testm3.NewStorageAndSession(t, ctrl)
}
downsampler := downsample.NewMockDownsampler(ctrl)
- return NewDownsamplerAndWriter(storage, downsampler, testWorkerPool).(*downsamplerAndWriter), downsampler, session
+ return NewDownsamplerAndWriter(storage, downsampler, testWorkerPool, instrument.NewOptions()).(*downsamplerAndWriter), downsampler, session
}
func newTestDownsamplerAndWriterWithAggregatedNamespace(
@@ -550,7 +833,7 @@ func newTestDownsamplerAndWriterWithAggregatedNamespace(
storage, session := testm3.NewStorageAndSessionWithAggregatedNamespaces(
t, ctrl, aggregatedNamespaces)
downsampler := downsample.NewMockDownsampler(ctrl)
- return NewDownsamplerAndWriter(storage, downsampler, testWorkerPool).(*downsamplerAndWriter), downsampler, session
+ return NewDownsamplerAndWriter(storage, downsampler, testWorkerPool, instrument.NewOptions()).(*downsamplerAndWriter), downsampler, session
}
func init() {
diff --git a/src/cmd/services/m3coordinator/server/m3msg/config.go b/src/cmd/services/m3coordinator/server/m3msg/config.go
index e8d1c52fc2..f135b3ae39 100644
--- a/src/cmd/services/m3coordinator/server/m3msg/config.go
+++ b/src/cmd/services/m3coordinator/server/m3msg/config.go
@@ -23,6 +23,7 @@ package m3msg
import (
"github.com/m3db/m3/src/msg/consumer"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/server"
)
@@ -42,6 +43,7 @@ type Configuration struct {
// NewServer creates a new server.
func (c Configuration) NewServer(
writeFn WriteFn,
+ rwOpts xio.Options,
iOpts instrument.Options,
) (server.Server, error) {
scope := iOpts.MetricsScope().Tagged(map[string]string{"server": "m3msg"})
@@ -50,6 +52,8 @@ func (c Configuration) NewServer(
"component": "consumer",
})),
)
+
+ cOpts = cOpts.SetDecoderOptions(cOpts.DecoderOptions().SetRWOptions(rwOpts))
h, err := c.Handler.newHandler(writeFn, cOpts, iOpts.SetMetricsScope(scope))
if err != nil {
return nil, err
diff --git a/src/cmd/services/m3coordinator/server/m3msg/protobuf_handler_test.go b/src/cmd/services/m3coordinator/server/m3msg/protobuf_handler_test.go
index e56956205b..b67319836f 100644
--- a/src/cmd/services/m3coordinator/server/m3msg/protobuf_handler_test.go
+++ b/src/cmd/services/m3coordinator/server/m3msg/protobuf_handler_test.go
@@ -45,7 +45,7 @@ var (
validStoragePolicy = policy.MustParseStoragePolicy("1m:40d")
)
-func TestM3msgServerWithProtobufHandler(t *testing.T) {
+func TestM3MsgServerWithProtobufHandler(t *testing.T) {
l, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
@@ -87,7 +87,7 @@ func TestM3msgServerWithProtobufHandler(t *testing.T) {
require.NoError(t, err)
var a msgpb.Ack
- dec := proto.NewDecoder(conn, opts.DecoderOptions())
+ dec := proto.NewDecoder(conn, opts.DecoderOptions(), 10)
require.NoError(t, dec.Decode(&a))
require.Equal(t, 1, w.ingested())
diff --git a/src/cmd/services/m3dbnode/config/bootstrap.go b/src/cmd/services/m3dbnode/config/bootstrap.go
index 68e10af65f..db32771f67 100644
--- a/src/cmd/services/m3dbnode/config/bootstrap.go
+++ b/src/cmd/services/m3dbnode/config/bootstrap.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
"github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
@@ -58,6 +59,9 @@ type BootstrapConfiguration struct {
// Commitlog bootstrapper configuration.
Commitlog *BootstrapCommitlogConfiguration `yaml:"commitlog"`
+ // Peers bootstrapper configuration.
+ Peers *BootstrapPeersConfiguration `yaml:"peers"`
+
// CacheSeriesMetadata determines whether individual bootstrappers cache
// series metadata across all calls (namespaces / shards / blocks).
CacheSeriesMetadata *bool `yaml:"cacheSeriesMetadata"`
@@ -67,18 +71,51 @@ type BootstrapConfiguration struct {
type BootstrapFilesystemConfiguration struct {
// NumProcessorsPerCPU is the number of processors per CPU.
NumProcessorsPerCPU float64 `yaml:"numProcessorsPerCPU" validate:"min=0.0"`
+
+ // Migration configuration specifies what version, if any, existing data filesets should be migrated to
+ // if necessary.
+ Migration *BootstrapMigrationConfiguration `yaml:"migration"`
}
func (c BootstrapFilesystemConfiguration) numCPUs() int {
return int(math.Ceil(float64(c.NumProcessorsPerCPU * float64(runtime.NumCPU()))))
}
+func (c BootstrapFilesystemConfiguration) migration() BootstrapMigrationConfiguration {
+ if cfg := c.Migration; cfg != nil {
+ return *cfg
+ }
+ return BootstrapMigrationConfiguration{}
+}
+
func newDefaultBootstrapFilesystemConfiguration() BootstrapFilesystemConfiguration {
return BootstrapFilesystemConfiguration{
NumProcessorsPerCPU: defaultNumProcessorsPerCPU,
+ Migration: &BootstrapMigrationConfiguration{},
}
}
+// BootstrapMigrationConfiguration specifies configuration for data migrations during bootstrapping.
+type BootstrapMigrationConfiguration struct {
+ // TargetMigrationVersion indicates that we should attempt to upgrade filesets to
+ // what’s expected of the specified version.
+ TargetMigrationVersion migration.MigrationVersion `yaml:"targetMigrationVersion"`
+
+ // Concurrency sets the number of concurrent workers performing migrations.
+ Concurrency int `yaml:"concurrency"`
+}
+
+// NewOptions generates migration.Options from the configuration.
+func (m BootstrapMigrationConfiguration) NewOptions() migration.Options {
+ opts := migration.NewOptions().SetTargetMigrationVersion(m.TargetMigrationVersion)
+
+ if m.Concurrency > 0 {
+ opts = opts.SetConcurrency(m.Concurrency)
+ }
+
+ return opts
+}
+
// BootstrapCommitlogConfiguration specifies config for the commitlog bootstrapper.
type BootstrapCommitlogConfiguration struct {
// ReturnUnfulfilledForCorruptCommitLogFiles controls whether the commitlog bootstrapper
@@ -96,6 +133,25 @@ func newDefaultBootstrapCommitlogConfiguration() BootstrapCommitlogConfiguration
}
}
+// BootstrapPeersConfiguration specifies config for the peers bootstrapper.
+type BootstrapPeersConfiguration struct {
+ // StreamShardConcurrency controls how many shards in parallel to stream
+ // for in memory data being streamed between peers (most recent block).
+ // Defaults to: numCPU.
+ StreamShardConcurrency int `yaml:"streamShardConcurrency"`
+ // StreamPersistShardConcurrency controls how many shards in parallel to stream
+ // for historical data being streamed between peers (historical blocks).
+ // Defaults to: numCPU / 2.
+ StreamPersistShardConcurrency int `yaml:"streamPersistShardConcurrency"`
+}
+
+func newDefaultBootstrapPeersConfiguration() BootstrapPeersConfiguration {
+ return BootstrapPeersConfiguration{
+ StreamShardConcurrency: peers.DefaultShardConcurrency,
+ StreamPersistShardConcurrency: peers.DefaultShardPersistenceConcurrency,
+ }
+}
+
// BootstrapConfigurationValidator can be used to validate the option sets
// that the bootstrap configuration builds.
// Useful for tests and perhaps verifying same options set across multiple
@@ -159,9 +215,10 @@ func (bsc BootstrapConfiguration) New(
SetPersistManager(opts.PersistManager()).
SetCompactor(compactor).
SetBoostrapDataNumProcessors(fsCfg.numCPUs()).
- SetDatabaseBlockRetrieverManager(opts.DatabaseBlockRetrieverManager()).
SetRuntimeOptionsManager(opts.RuntimeOptionsManager()).
- SetIdentifierPool(opts.IdentifierPool())
+ SetIdentifierPool(opts.IdentifierPool()).
+ SetMigrationOptions(fsCfg.migration().NewOptions()).
+ SetStorageOptions(opts)
if err := validator.ValidateFilesystemBootstrapperOptions(fsbOpts); err != nil {
return nil, err
}
@@ -188,6 +245,7 @@ func (bsc BootstrapConfiguration) New(
return nil, err
}
case peers.PeersBootstrapperName:
+ pCfg := bsc.peersConfig()
pOpts := peers.NewOptions().
SetResultOptions(rsOpts).
SetFilesystemOptions(fsOpts).
@@ -195,9 +253,10 @@ func (bsc BootstrapConfiguration) New(
SetAdminClient(adminClient).
SetPersistManager(opts.PersistManager()).
SetCompactor(compactor).
- SetDatabaseBlockRetrieverManager(opts.DatabaseBlockRetrieverManager()).
SetRuntimeOptionsManager(opts.RuntimeOptionsManager()).
- SetContextPool(opts.ContextPool())
+ SetContextPool(opts.ContextPool()).
+ SetDefaultShardConcurrency(pCfg.StreamShardConcurrency).
+ SetShardPersistenceConcurrency(pCfg.StreamPersistShardConcurrency)
if err := validator.ValidatePeersBootstrapperOptions(pOpts); err != nil {
return nil, err
}
@@ -241,6 +300,13 @@ func (bsc BootstrapConfiguration) commitlogConfig() BootstrapCommitlogConfigurat
return newDefaultBootstrapCommitlogConfiguration()
}
+func (bsc BootstrapConfiguration) peersConfig() BootstrapPeersConfiguration {
+ if cfg := bsc.Peers; cfg != nil {
+ return *cfg
+ }
+ return newDefaultBootstrapPeersConfiguration()
+}
+
type bootstrapConfigurationValidator struct {
}
diff --git a/src/cmd/services/m3dbnode/config/cache.go b/src/cmd/services/m3dbnode/config/cache.go
index 4e1fdae0e9..cce694fb6e 100644
--- a/src/cmd/services/m3dbnode/config/cache.go
+++ b/src/cmd/services/m3dbnode/config/cache.go
@@ -23,7 +23,7 @@ package config
import "github.com/m3db/m3/src/dbnode/storage/series"
var (
- defaultPostingsListCacheSize = 2 << 14 // 32,768
+ defaultPostingsListCacheSize = 2 << 11 // 4096
defaultPostingsListCacheRegexp = true
defaultPostingsListCacheTerms = true
)
diff --git a/src/cmd/services/m3dbnode/config/config.go b/src/cmd/services/m3dbnode/config/config.go
index 4919cc5418..82b7e58797 100644
--- a/src/cmd/services/m3dbnode/config/config.go
+++ b/src/cmd/services/m3dbnode/config/config.go
@@ -156,7 +156,7 @@ type DBConfiguration struct {
// Limits contains configuration for limits that can be applied to M3DB for the purposes
// of applying back-pressure or protecting the db nodes.
- Limits Limits `yaml:"limits"`
+ Limits LimitsConfiguration `yaml:"limits"`
// TChannel exposes TChannel config options.
TChannel *TChannelConfiguration `yaml:"tchannel"`
diff --git a/src/cmd/services/m3dbnode/config/config_mock.go b/src/cmd/services/m3dbnode/config/config_mock.go
index a511f9fd67..b0e94f8b4d 100644
--- a/src/cmd/services/m3dbnode/config/config_mock.go
+++ b/src/cmd/services/m3dbnode/config/config_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/cmd/services/m3dbnode/config (interfaces: BootstrapConfigurationValidator)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/cmd/services/m3dbnode/config/config_test.go b/src/cmd/services/m3dbnode/config/config_test.go
index d9c92b5296..c1c2b0a235 100644
--- a/src/cmd/services/m3dbnode/config/config_test.go
+++ b/src/cmd/services/m3dbnode/config/config_test.go
@@ -398,6 +398,7 @@ func TestConfiguration(t *testing.T) {
maxRetries: 3
forever: null
jitter: true
+ logErrorSampleRate: 0
backgroundHealthCheckFailLimit: 4
backgroundHealthCheckFailThrottleFactor: 0.5
hashing:
@@ -406,6 +407,9 @@ func TestConfiguration(t *testing.T) {
asyncWriteWorkerPoolSize: null
asyncWriteMaxConcurrency: null
useV2BatchAPIs: null
+ writeTimestampOffset: null
+ fetchSeriesBlocksBatchConcurrency: null
+ fetchSeriesBlocksBatchSize: null
gcPercentage: 100
writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms
@@ -417,8 +421,10 @@ func TestConfiguration(t *testing.T) {
- noop-all
fs:
numProcessorsPerCPU: 0.42
+ migration: null
commitlog:
returnUnfulfilledForCorruptCommitLogFiles: false
+ peers: null
cacheSeriesMetadata: null
blockRetrieve: null
cache:
@@ -711,9 +717,11 @@ func TestConfiguration(t *testing.T) {
reconnect_period: 0s
meta_event_reporting_enabled: false
limits:
+ maxRecentlyQueriedSeriesBlocks: null
maxOutstandingWriteRequests: 0
maxOutstandingReadRequests: 0
maxOutstandingRepairedBytes: 0
+ maxEncodersPerBlock: 0
tchannel: null
coordinator: null
`
diff --git a/src/cmd/services/m3dbnode/config/limits.go b/src/cmd/services/m3dbnode/config/limits.go
index 9b7e593720..f2edac16ba 100644
--- a/src/cmd/services/m3dbnode/config/limits.go
+++ b/src/cmd/services/m3dbnode/config/limits.go
@@ -20,14 +20,22 @@
package config
-// Limits contains configuration for configurable limits that can be applied to M3DB.
-type Limits struct {
+import "time"
+
+// LimitsConfiguration contains configuration for configurable limits that can be applied to M3DB.
+type LimitsConfiguration struct {
+ // MaxRecentlyQueriedSeriesBlocks sets the upper limit on time series blocks
+ // count within a given lookback period. Queries which are issued while this
+ // max is surpassed encounter an error.
+ MaxRecentlyQueriedSeriesBlocks *MaxRecentlyQueriedSeriesBlocksConfiguration `yaml:"maxRecentlyQueriedSeriesBlocks"`
+
// MaxOutstandingWriteRequests controls the maximum number of outstanding write requests
// that the server will allow before it begins rejecting requests. Note that this value
// is independent of the number of values that are being written (due to variable batch
// size from the client) but is still very useful for enforcing backpressure due to the fact
// that all writes within a single RPC are single-threaded.
MaxOutstandingWriteRequests int `yaml:"maxOutstandingWriteRequests" validate:"min=0"`
+
// MaxOutstandingReadRequests controls the maximum number of outstanding read requests that
// the server will allow before it begins rejecting requests. Just like MaxOutstandingWriteRequests
// this value is independent of the number of time series being read.
@@ -39,4 +47,22 @@ type Limits struct {
// process would pause until some of the repaired bytes had been persisted to disk (and subsequently
// evicted from memory) at which point it would resume.
MaxOutstandingRepairedBytes int64 `yaml:"maxOutstandingRepairedBytes" validate:"min=0"`
+
+ // MaxEncodersPerBlock is the maximum number of encoders permitted in a block.
+ // When there are too many encoders, merging them (during a tick) puts a high
+ // load on the CPU, which can prevent other DB operations.
+ // A setting of 0 means there is no maximum.
+ MaxEncodersPerBlock int `yaml:"maxEncodersPerBlock" validate:"min=0"`
+}
+
+// MaxRecentlyQueriedSeriesBlocksConfiguration sets the upper limit on time
+// series blocks count within a given lookback period. Queries which are issued
+// while this max is surpassed encounter an error.
+type MaxRecentlyQueriedSeriesBlocksConfiguration struct {
+ // Value sets the max recently queried time series blocks for the given
+ // time window.
+ Value int64 `yaml:"value" validate:"min=0"`
+ // Lookback is the period to time window the max value of time series
+ // blocks allowed to be queried.
+ Lookback time.Duration `yaml:"lookback" validate:"min=0"`
}
diff --git a/src/cmd/services/m3dbnode/config/pooling.go b/src/cmd/services/m3dbnode/config/pooling.go
index 29c117c50a..584397a86b 100644
--- a/src/cmd/services/m3dbnode/config/pooling.go
+++ b/src/cmd/services/m3dbnode/config/pooling.go
@@ -75,7 +75,7 @@ var (
// concurrent query, so as long as there is no more than
// the number of concurrent index queries than the size
// specified here the maps should be recycled.
- size: 512,
+ size: 256,
refillLowWaterMark: 0,
refillHighWaterMark: 0,
},
@@ -90,22 +90,22 @@ var (
},
"tagDecoder": defaultPoolPolicy,
"context": poolPolicyDefault{
- size: 131072,
+ size: 32768,
refillLowWaterMark: 0,
refillHighWaterMark: 0,
},
"series": poolPolicyDefault{
- size: 262144,
+ size: 65536,
refillLowWaterMark: defaultRefillLowWaterMark,
refillHighWaterMark: defaultRefillHighWaterMark,
},
"block": poolPolicyDefault{
- size: 262144,
+ size: 65536,
refillLowWaterMark: defaultRefillLowWaterMark,
refillHighWaterMark: defaultRefillHighWaterMark,
},
"encoder": poolPolicyDefault{
- size: 262144,
+ size: 65536,
refillLowWaterMark: defaultRefillLowWaterMark,
refillHighWaterMark: defaultRefillHighWaterMark,
},
@@ -113,7 +113,7 @@ var (
// NB(r): Note this has to be bigger than context pool by
// big fraction (by factor of say 4) since each context
// usually uses a fair few closers.
- size: 524288,
+ size: 262144,
refillLowWaterMark: 0,
refillHighWaterMark: 0,
},
@@ -138,7 +138,7 @@ var (
refillHighWaterMark: defaultRefillHighWaterMark,
},
"identifier": poolPolicyDefault{
- size: 262144,
+ size: 65536,
refillLowWaterMark: defaultRefillLowWaterMark,
refillHighWaterMark: defaultRefillHighWaterMark,
},
diff --git a/src/cmd/services/m3em_agent/agentmain/agent.go b/src/cmd/services/m3em_agent/agentmain/agent.go
index c2d5f11306..53298f4327 100644
--- a/src/cmd/services/m3em_agent/agentmain/agent.go
+++ b/src/cmd/services/m3em_agent/agentmain/agent.go
@@ -93,7 +93,7 @@ func Run() {
iopts := instrument.NewOptions().
SetLogger(logger).
SetMetricsScope(scope).
- SetMetricsSamplingRate(conf.Metrics.SampleRate)
+ SetTimerOptions(instrument.TimerOptions{StandardSampleRate: conf.Metrics.SampleRate})
agentOpts := agent.NewOptions(iopts).
SetWorkingDirectory(conf.Agent.WorkingDir).
diff --git a/src/cmd/services/m3nsch_server/main/main.go b/src/cmd/services/m3nsch_server/main/main.go
index fb58880e6a..79eeba0012 100644
--- a/src/cmd/services/m3nsch_server/main/main.go
+++ b/src/cmd/services/m3nsch_server/main/main.go
@@ -89,7 +89,7 @@ func main() {
NewOptions().
SetLogger(rawLogger).
SetMetricsScope(scope).
- SetMetricsSamplingRate(conf.Metrics.SamplingRate)
+ SetTimerOptions(instrument.TimerOptions{StandardSampleRate: conf.Metrics.SampleRate()})
datumRegistry := datums.NewDefaultRegistry(conf.M3nsch.NumPointsPerDatum)
agentOpts := agent.NewOptions(iopts).
SetConcurrency(conf.M3nsch.Concurrency).
diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go
index 00001fbd89..1bc81e6f4c 100644
--- a/src/cmd/services/m3query/config/config.go
+++ b/src/cmd/services/m3query/config/config.go
@@ -35,6 +35,8 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
xconfig "github.com/m3db/m3/src/x/config"
"github.com/m3db/m3/src/x/config/listenaddress"
"github.com/m3db/m3/src/x/cost"
@@ -52,11 +54,20 @@ const (
GRPCStorageType BackendStorageType = "grpc"
// M3DBStorageType is for m3db backend.
M3DBStorageType BackendStorageType = "m3db"
+ // NoopEtcdStorageType is for a noop backend which returns empty results for
+ // any query and blackholes any writes, but requires that a valid etcd cluster
+ // is defined and can be connected to. Primarily used for standalone
+ // coordinators used only to serve m3admin APIs.
+ NoopEtcdStorageType BackendStorageType = "noop-etcd"
defaultCarbonIngesterListenAddress = "0.0.0.0:7204"
errNoIDGenerationScheme = "error: a recent breaking change means that an ID " +
"generation scheme is required in coordinator configuration settings. " +
"More information is available here: %s"
+
+ defaultQueryTimeout = 30 * time.Second
+
+ defaultPrometheusMaxSamplesPerQuery = 100000000
)
var (
@@ -65,7 +76,8 @@ var (
defaultCarbonIngesterAggregationType = aggregation.Mean
- defaultStorageQueryLimit = 10000
+ defaultStorageQuerySeriesLimit = 10000
+ defaultStorageQueryDocsLimit = 0 // Default OFF.
)
// Configuration is the configuration for the query service.
@@ -124,6 +136,9 @@ type Configuration struct {
// Carbon is the carbon configuration.
Carbon *CarbonConfiguration `yaml:"carbon"`
+ // Query is the query configuration.
+ Query QueryConfiguration `yaml:"query"`
+
// Limits specifies limits on per-query resource usage.
Limits LimitsConfiguration `yaml:"limits"`
@@ -144,6 +159,9 @@ type Configuration struct {
// stanza not able to startup the binary since we parse YAML in strict mode
// by default).
DeprecatedCache CacheConfiguration `yaml:"cache"`
+
+ // MultiProcess is the multi-process configuration.
+ MultiProcess MultiProcessConfiguration `yaml:"multiProcess"`
}
// WriteForwardingConfiguration is the write forwarding configuration.
@@ -190,18 +208,102 @@ type ResultOptions struct {
KeepNans bool `yaml:"keepNans"`
}
+// QueryConfiguration is the query configuration.
+type QueryConfiguration struct {
+ // Timeout is the query timeout.
+ Timeout *time.Duration `yaml:"timeout"`
+ // DefaultEngine is the default query engine.
+ DefaultEngine string `yaml:"defaultEngine"`
+ // ConsolidationConfiguration are configs for consolidating fetched queries.
+ ConsolidationConfiguration ConsolidationConfiguration `yaml:"consolidation"`
+ // Prometheus is prometheus client configuration.
+ Prometheus PrometheusQueryConfiguration `yaml:"prometheus"`
+ // RestrictTags is an optional configuration that can be set to restrict
+ // all queries with certain tags by.
+ RestrictTags *RestrictTagsConfiguration `yaml:"restrictTags"`
+}
+
+// TimeoutOrDefault returns the configured timeout or default value.
+func (c QueryConfiguration) TimeoutOrDefault() time.Duration {
+ if v := c.Timeout; v != nil {
+ return *v
+ }
+ return defaultQueryTimeout
+}
+
+// RestrictTagsAsStorageRestrictByTag returns restrict tags as
+// storage options to restrict all queries by default.
+func (c QueryConfiguration) RestrictTagsAsStorageRestrictByTag() (*storage.RestrictByTag, bool, error) {
+ if c.RestrictTags == nil {
+ return nil, false, nil
+ }
+
+ var (
+ cfg = *c.RestrictTags
+ result = handleroptions.StringTagOptions{
+ Restrict: make([]handleroptions.StringMatch, 0, len(cfg.Restrict)),
+ Strip: cfg.Strip,
+ }
+ )
+ for _, elem := range cfg.Restrict {
+ value := handleroptions.StringMatch(elem)
+ result.Restrict = append(result.Restrict, value)
+ }
+
+ opts, err := result.StorageOptions()
+ if err != nil {
+ return nil, false, err
+ }
+
+ return opts, true, nil
+}
+
+// RestrictTagsConfiguration applies tag restriction to all queries.
+type RestrictTagsConfiguration struct {
+ Restrict []StringMatch `yaml:"match"`
+ Strip []string `yaml:"strip"`
+}
+
+// StringMatch is an easy to use representation of models.Matcher.
+type StringMatch struct {
+ Name string `yaml:"name"`
+ Type string `yaml:"type"`
+ Value string `yaml:"value"`
+}
+
+// ConsolidationConfiguration are configs for consolidating fetched queries.
+type ConsolidationConfiguration struct {
+ // MatchType determines the options by which series should match.
+ MatchType consolidators.MatchType `yaml:"matchType"`
+}
+
+// PrometheusQueryConfiguration is the prometheus query engine configuration.
+type PrometheusQueryConfiguration struct {
+ // MaxSamplesPerQuery is the limit on fetched samples per query.
+ MaxSamplesPerQuery *int `yaml:"maxSamplesPerQuery"`
+}
+
+// MaxSamplesPerQueryOrDefault returns the max samples per query or default.
+func (c PrometheusQueryConfiguration) MaxSamplesPerQueryOrDefault() int {
+ if v := c.MaxSamplesPerQuery; v != nil {
+ return *v
+ }
+
+ return defaultPrometheusMaxSamplesPerQuery
+}
+
// LimitsConfiguration represents limitations on resource usage in the query
// instance. Limits are split between per-query and global limits.
type LimitsConfiguration struct {
- // deprecated: use PerQuery.MaxComputedDatapoints instead.
- DeprecatedMaxComputedDatapoints int64 `yaml:"maxComputedDatapoints"`
+ // PerQuery configures limits which apply to each query individually.
+ PerQuery PerQueryLimitsConfiguration `yaml:"perQuery"`
// Global configures limits which apply across all queries running on this
// instance.
Global GlobalLimitsConfiguration `yaml:"global"`
- // PerQuery configures limits which apply to each query individually.
- PerQuery PerQueryLimitsConfiguration `yaml:"perQuery"`
+ // deprecated: use PerQuery.MaxComputedDatapoints instead.
+ DeprecatedMaxComputedDatapoints int `yaml:"maxComputedDatapoints"`
}
// MaxComputedDatapoints is a getter providing backwards compatibility between
@@ -209,7 +311,7 @@ type LimitsConfiguration struct {
// LimitsConfiguration.PerQuery.PrivateMaxComputedDatapoints. See
// LimitsConfiguration.PerQuery.PrivateMaxComputedDatapoints for a comment on
// the semantics.
-func (lc *LimitsConfiguration) MaxComputedDatapoints() int64 {
+func (lc LimitsConfiguration) MaxComputedDatapoints() int {
if lc.PerQuery.PrivateMaxComputedDatapoints != 0 {
return lc.PerQuery.PrivateMaxComputedDatapoints
}
@@ -220,9 +322,10 @@ func (lc *LimitsConfiguration) MaxComputedDatapoints() int64 {
// GlobalLimitsConfiguration represents limits on resource usage across a query
// instance. Zero or negative values imply no limit.
type GlobalLimitsConfiguration struct {
- // MaxFetchedDatapoints limits the total number of datapoints actually
- // fetched by all queries at any given time.
- MaxFetchedDatapoints int64 `yaml:"maxFetchedDatapoints"`
+ // MaxFetchedDatapoints limits the max number of datapoints allowed to be
+ // used by all queries at any point in time, this is applied at the query
+ // service after the result has been returned by a storage node.
+ MaxFetchedDatapoints int `yaml:"maxFetchedDatapoints"`
}
// AsLimitManagerOptions converts this configuration to
@@ -234,6 +337,24 @@ func (l *GlobalLimitsConfiguration) AsLimitManagerOptions() cost.LimitManagerOpt
// PerQueryLimitsConfiguration represents limits on resource usage within a
// single query. Zero or negative values imply no limit.
type PerQueryLimitsConfiguration struct {
+ // MaxFetchedSeries limits the number of time series returned for any given
+ // individual storage node per query, before returning result to query
+ // service.
+ MaxFetchedSeries int `yaml:"maxFetchedSeries"`
+
+ // MaxFetchedDocs limits the number of index documents matched for any given
+ // individual storage node per query, before returning result to query
+ // service.
+ MaxFetchedDocs int `yaml:"maxFetchedDocs"`
+
+ // RequireExhaustive results in an error if the query exceeds any limit.
+ RequireExhaustive bool `yaml:"requireExhaustive"`
+
+ // MaxFetchedDatapoints limits the max number of datapoints allowed to be
+ // used by a given query, this is applied at the query service after the
+ // result has been returned by a storage node.
+ MaxFetchedDatapoints int `yaml:"maxFetchedDatapoints"`
+
// PrivateMaxComputedDatapoints limits the number of datapoints that can be
// returned by a query. It's determined purely
// from the size of the time range and the step size (end - start / step).
@@ -241,14 +362,7 @@ type PerQueryLimitsConfiguration struct {
// N.B.: the hacky "Private" prefix is to indicate that callers should use
// LimitsConfiguration.MaxComputedDatapoints() instead of accessing
// this field directly.
- PrivateMaxComputedDatapoints int64 `yaml:"maxComputedDatapoints"`
-
- // MaxFetchedDatapoints limits the number of datapoints actually used by a
- // given query.
- MaxFetchedDatapoints int64 `yaml:"maxFetchedDatapoints"`
-
- // MaxFetchedSeries limits the number of time series returned by a storage node.
- MaxFetchedSeries int64 `yaml:"maxFetchedSeries"`
+ PrivateMaxComputedDatapoints int `yaml:"maxComputedDatapoints"`
}
// AsLimitManagerOptions converts this configuration to
@@ -257,21 +371,27 @@ func (l *PerQueryLimitsConfiguration) AsLimitManagerOptions() cost.LimitManagerO
return toLimitManagerOptions(l.MaxFetchedDatapoints)
}
-// AsFetchOptionsBuilderOptions converts this configuration to
-// handler.FetchOptionsBuilderOptions.
-func (l *PerQueryLimitsConfiguration) AsFetchOptionsBuilderOptions() handleroptions.FetchOptionsBuilderOptions {
- if l.MaxFetchedSeries <= 0 {
- return handleroptions.FetchOptionsBuilderOptions{
- Limit: defaultStorageQueryLimit,
- }
+// AsFetchOptionsBuilderLimitsOptions converts this configuration to
+// handleroptions.FetchOptionsBuilderLimitsOptions.
+func (l *PerQueryLimitsConfiguration) AsFetchOptionsBuilderLimitsOptions() handleroptions.FetchOptionsBuilderLimitsOptions {
+ seriesLimit := defaultStorageQuerySeriesLimit
+ if v := l.MaxFetchedSeries; v > 0 {
+ seriesLimit = v
+ }
+
+ docsLimit := defaultStorageQueryDocsLimit
+ if v := l.MaxFetchedDocs; v > 0 {
+ docsLimit = v
}
- return handleroptions.FetchOptionsBuilderOptions{
- Limit: int(l.MaxFetchedSeries),
+ return handleroptions.FetchOptionsBuilderLimitsOptions{
+ SeriesLimit: int(seriesLimit),
+ DocsLimit: int(docsLimit),
+ RequireExhaustive: l.RequireExhaustive,
}
}
-func toLimitManagerOptions(limit int64) cost.LimitManagerOptions {
+func toLimitManagerOptions(limit int) cost.LimitManagerOptions {
return cost.NewLimitManagerOptions().SetDefaultLimit(cost.Limit{
Threshold: cost.Cost(limit),
Enabled: limit > 0,
@@ -340,7 +460,7 @@ func (c *CarbonIngesterConfiguration) RulesOrDefault(namespaces m3.ClusterNamesp
// Default to fanning out writes for all metrics to all aggregated namespaces if any exists.
policies := []CarbonIngesterStoragePolicyConfiguration{}
for _, ns := range namespaces {
- if ns.Options().Attributes().MetricsType == storage.AggregatedMetricsType {
+ if ns.Options().Attributes().MetricsType == storagemetadata.AggregatedMetricsType {
policies = append(policies, CarbonIngesterStoragePolicyConfiguration{
Resolution: ns.Options().Attributes().Resolution,
Retention: ns.Options().Attributes().Retention,
@@ -487,6 +607,21 @@ type TagOptionsConfiguration struct {
// Scheme determines the default ID generation scheme. Defaults to TypeLegacy.
Scheme models.IDSchemeType `yaml:"idScheme"`
+
+ // Filters are optional tag filters, removing all series with tags
+ // matching the filter from computations.
+ Filters []TagFilter `yaml:"filters"`
+}
+
+// TagFilter is a tag filter.
+type TagFilter struct {
+ // Values are the values to filter.
+ //
+ // NB:If this is unset, all series containing
+ // a tag with given `Name` are filtered.
+ Values []string `yaml:"values"`
+ // Name is the tag name.
+ Name string `yaml:"name"`
}
// TagOptionsFromConfig translates tag option configuration into tag options.
@@ -513,6 +648,23 @@ func TagOptionsFromConfig(cfg TagOptionsConfiguration) (models.TagOptions, error
return nil, err
}
+ if cfg.Filters != nil {
+ filters := make([]models.Filter, 0, len(cfg.Filters))
+ for _, filter := range cfg.Filters {
+ values := make([][]byte, 0, len(filter.Values))
+ for _, strVal := range filter.Values {
+ values = append(values, []byte(strVal))
+ }
+
+ filters = append(filters, models.Filter{
+ Name: []byte(filter.Name),
+ Values: values,
+ })
+ }
+
+ opts = opts.SetFilters(filters)
+ }
+
return opts, nil
}
@@ -520,3 +672,20 @@ func TagOptionsFromConfig(cfg TagOptionsConfiguration) (models.TagOptions, error
type ExperimentalAPIConfiguration struct {
Enabled bool `yaml:"enabled"`
}
+
+// MultiProcessConfiguration is the multi-process configuration which
+// allows running multiple sub-processes of an instance reusing the
+// same listen ports.
+type MultiProcessConfiguration struct {
+ // Enabled is whether to enable multi-process execution.
+ Enabled bool `yaml:"enabled"`
+ // Count is the number of sub-processes to run, leave zero
+ // to auto-detect based on number of CPUs.
+ Count int `yaml:"count" validate:"min=0"`
+ // PerCPU is the factor of processes to run per CPU, leave
+ // zero to use the default of 0.5 per CPU (i.e. one process for
+ // every two CPUs).
+ PerCPU float64 `yaml:"perCPU" validate:"min=0.0, max=0.0"`
+ // GoMaxProcs if set will explicitly set the child GOMAXPROCs env var.
+ GoMaxProcs int `yaml:"goMaxProcs"`
+}
diff --git a/src/cmd/services/m3query/config/config_test.go b/src/cmd/services/m3query/config/config_test.go
index d77dbe49df..0143abf2fc 100644
--- a/src/cmd/services/m3query/config/config_test.go
+++ b/src/cmd/services/m3query/config/config_test.go
@@ -65,43 +65,57 @@ func TestTagOptionsFromConfig(t *testing.T) {
cfg := TagOptionsConfiguration{
MetricName: name,
Scheme: models.TypeLegacy,
+ Filters: []TagFilter{
+ {Name: "foo", Values: []string{".", "abc"}},
+ {Name: "bar", Values: []string{".*"}},
+ },
}
opts, err := TagOptionsFromConfig(cfg)
require.NoError(t, err)
require.NotNil(t, opts)
assert.Equal(t, []byte(name), opts.MetricName())
+ filters := opts.Filters()
+ exNames := [][]byte{[]byte("foo"), []byte("bar")}
+ exVals := [][]string{{".", "abc"}, {".*"}}
+ require.Equal(t, 2, len(filters))
+ for i, f := range filters {
+ assert.Equal(t, exNames[i], f.Name)
+ for j, v := range f.Values {
+ assert.Equal(t, []byte(exVals[i][j]), v)
+ }
+ }
}
-func TestLimitsConfiguration_AsLimitManagerOptions(t *testing.T) {
+func TestLimitsConfigurationAsLimitManagerOptions(t *testing.T) {
cases := []struct {
- Input interface {
+ input interface {
AsLimitManagerOptions() cost.LimitManagerOptions
}
- ExpectedDefault int64
+ expectedDefault int
}{{
- Input: &PerQueryLimitsConfiguration{
+ input: &PerQueryLimitsConfiguration{
MaxFetchedDatapoints: 5,
},
- ExpectedDefault: 5,
+ expectedDefault: 5,
}, {
- Input: &GlobalLimitsConfiguration{
+ input: &GlobalLimitsConfiguration{
MaxFetchedDatapoints: 6,
},
- ExpectedDefault: 6,
+ expectedDefault: 6,
}}
for _, tc := range cases {
- t.Run(fmt.Sprintf("type_%T", tc.Input), func(t *testing.T) {
- res := tc.Input.AsLimitManagerOptions()
+ t.Run(fmt.Sprintf("type_%T", tc.input), func(t *testing.T) {
+ res := tc.input.AsLimitManagerOptions()
assert.Equal(t, cost.Limit{
- Threshold: cost.Cost(tc.ExpectedDefault),
+ Threshold: cost.Cost(tc.expectedDefault),
Enabled: true,
}, res.DefaultLimit())
})
}
}
-func TestLimitsConfiguration_MaxComputedDatapoints(t *testing.T) {
+func TestLimitsConfigurationMaxComputedDatapoints(t *testing.T) {
t.Run("uses PerQuery value if provided", func(t *testing.T) {
lc := &LimitsConfiguration{
DeprecatedMaxComputedDatapoints: 6,
@@ -110,7 +124,7 @@ func TestLimitsConfiguration_MaxComputedDatapoints(t *testing.T) {
},
}
- assert.Equal(t, int64(5), lc.MaxComputedDatapoints())
+ assert.Equal(t, 5, lc.MaxComputedDatapoints())
})
t.Run("uses deprecated value if PerQuery not provided", func(t *testing.T) {
@@ -118,41 +132,41 @@ func TestLimitsConfiguration_MaxComputedDatapoints(t *testing.T) {
DeprecatedMaxComputedDatapoints: 6,
}
- assert.Equal(t, int64(6), lc.MaxComputedDatapoints())
+ assert.Equal(t, 6, lc.MaxComputedDatapoints())
})
}
func TestToLimitManagerOptions(t *testing.T) {
cases := []struct {
- Name string
- Input int64
- ExpectedLimit cost.Limit
+ name string
+ input int
+ expectedLimit cost.Limit
}{{
- Name: "negative is disabled",
- Input: -5,
- ExpectedLimit: cost.Limit{
+ name: "negative is disabled",
+ input: -5,
+ expectedLimit: cost.Limit{
Threshold: cost.Cost(-5),
Enabled: false,
},
}, {
- Name: "zero is disabled",
- Input: 0,
- ExpectedLimit: cost.Limit{
+ name: "zero is disabled",
+ input: 0,
+ expectedLimit: cost.Limit{
Threshold: cost.Cost(0),
Enabled: false,
},
}, {
- Name: "positive is enabled",
- Input: 5,
- ExpectedLimit: cost.Limit{
+ name: "positive is enabled",
+ input: 5,
+ expectedLimit: cost.Limit{
Threshold: cost.Cost(5),
Enabled: true,
},
}}
for _, tc := range cases {
- t.Run(tc.Name, func(t *testing.T) {
- assert.Equal(t, tc.ExpectedLimit, toLimitManagerOptions(tc.Input).DefaultLimit())
+ t.Run(tc.name, func(t *testing.T) {
+ assert.Equal(t, tc.expectedLimit, toLimitManagerOptions(tc.input).DefaultLimit())
})
}
}
@@ -185,25 +199,25 @@ func TestConfigValidation(t *testing.T) {
// limits configuration
limitsCfgCases := []struct {
- Name string
- Limit int64
+ name string
+ limit int
}{{
- Name: "empty LimitsConfiguration is valid (implies disabled)",
- Limit: 0,
+ name: "empty LimitsConfiguration is valid (implies disabled)",
+ limit: 0,
}, {
- Name: "LimitsConfiguration with positive limit is valid",
- Limit: 5,
+ name: "LimitsConfiguration with positive limit is valid",
+ limit: 5,
}, {}, {
- Name: "LimitsConfiguration with negative limit is valid (implies disabled)",
- Limit: -5,
+ name: "LimitsConfiguration with negative limit is valid (implies disabled)",
+ limit: -5,
}}
for _, tc := range limitsCfgCases {
- t.Run(tc.Name, func(t *testing.T) {
+ t.Run(tc.name, func(t *testing.T) {
cfg := baseCfg(t)
cfg.Limits = LimitsConfiguration{
PerQuery: PerQueryLimitsConfiguration{
- PrivateMaxComputedDatapoints: tc.Limit,
+ PrivateMaxComputedDatapoints: tc.limit,
}}
assert.NoError(t, validator.Validate(cfg))
diff --git a/src/cmd/services/m3ctl/config/config.go b/src/cmd/services/r2ctl/config/config.go
similarity index 100%
rename from src/cmd/services/m3ctl/config/config.go
rename to src/cmd/services/r2ctl/config/config.go
diff --git a/src/cmd/services/m3ctl/config/server.go b/src/cmd/services/r2ctl/config/server.go
similarity index 100%
rename from src/cmd/services/m3ctl/config/server.go
rename to src/cmd/services/r2ctl/config/server.go
diff --git a/src/cmd/services/m3ctl/main/main.go b/src/cmd/services/r2ctl/main/main.go
similarity index 97%
rename from src/cmd/services/m3ctl/main/main.go
rename to src/cmd/services/r2ctl/main/main.go
index 40fe8e1332..347aa5b462 100644
--- a/src/cmd/services/m3ctl/main/main.go
+++ b/src/cmd/services/r2ctl/main/main.go
@@ -29,7 +29,7 @@ import (
"syscall"
"time"
- "github.com/m3db/m3/src/cmd/services/m3ctl/config"
+ "github.com/m3db/m3/src/cmd/services/r2ctl/config"
"github.com/m3db/m3/src/ctl/auth"
"github.com/m3db/m3/src/ctl/server/http"
"github.com/m3db/m3/src/ctl/service/health"
@@ -102,7 +102,7 @@ func main() {
instrumentOpts := instrument.NewOptions().
SetLogger(rawLogger).
SetMetricsScope(scope).
- SetMetricsSamplingRate(cfg.Metrics.SampleRate()).
+ SetTimerOptions(instrument.TimerOptions{StandardSampleRate: cfg.Metrics.SampleRate()}).
SetReportInterval(cfg.Metrics.ReportInterval())
// Create R2 store.
diff --git a/src/cmd/tools/docs_test/main/main.go b/src/cmd/tools/docs_test/main/main.go
index 1079d72a11..2236daf663 100644
--- a/src/cmd/tools/docs_test/main/main.go
+++ b/src/cmd/tools/docs_test/main/main.go
@@ -23,7 +23,6 @@ package main
import (
"crypto/tls"
- "flag"
"fmt"
"io/ioutil"
"log"
@@ -32,26 +31,40 @@ import (
"path"
"path/filepath"
"strings"
+ "time"
"github.com/m3db/m3/src/x/docs"
- "github.com/russross/blackfriday"
+ "github.com/pborman/getopt"
+ "gopkg.in/russross/blackfriday.v2"
)
func main() {
+ // Default link excludes.
+ linkExcludes := []string{
+ // Youtube has some problematic public rate limits.
+ "youtu.be",
+ "youtube.com",
+ }
+
var (
- docsDirArg = flag.String("docsdir", "docs", "The documents directory to test")
+ docsDirArg = getopt.StringLong("docsdir", 'd', "docs", "The documents directory to test")
+ linkSleepArg = getopt.DurationLong("linksleep", 's', time.Second, "Amount to sleep between checking links to avoid 429 rate limits from github")
)
- flag.Parse()
-
- if *docsDirArg == "" {
- flag.Usage()
- os.Exit(1)
+ getopt.ListVarLong(&linkExcludes, "linkexcludes", 'e', "Exclude strings to check links against due to rate limiting/flakiness")
+ getopt.Parse()
+ if len(*docsDirArg) == 0 {
+ getopt.Usage()
+ return
}
docsDir := *docsDirArg
+ linkSleep := *linkSleepArg
- repoPathsValidated := 0
+ var (
+ excluded []string
+ repoPathsValidated int
+ )
resultErr := filepath.Walk(docsDir, func(filePath string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("failed to walk path=%s: %v", filePath, err)
@@ -76,37 +89,50 @@ func main() {
switch node.Type {
case blackfriday.Link:
link := node.LinkData
- url := strings.TrimSpace(string(link.Destination))
+ linkURL := strings.TrimSpace(string(link.Destination))
resolvedPath := ""
- if parse, ok := docs.ParseRepoPathURL(url); ok {
+ if parse, ok := docs.ParseRepoPathURL(linkURL); ok {
// Internal relative local repo path
resolvedPath = parse.RepoPath
- } else if isHTTPOrHTTPS(url) {
+ } else if isHTTPOrHTTPS(linkURL) {
// External link
- resolvedPath = url
+ resolvedPath = linkURL
} else {
// Otherwise should be a direct relative link
// (not repo URL prefixed)
- if v := strings.Index(url, "#"); v != -1 {
+ if v := strings.Index(linkURL, "#"); v != -1 {
// Remove links to subsections of docs
- url = url[:v]
+ linkURL = linkURL[:v]
}
- if len(url) > 0 && url[0] == '/' {
+ if len(linkURL) > 0 && linkURL[0] == '/' {
// This is an absolute link to within the docs directory
- resolvedPath = path.Join(docsDir, url[1:])
+ resolvedPath = path.Join(docsDir, linkURL[1:])
} else {
// We assume this is a relative path from the current doc
- resolvedPath = path.Join(path.Dir(filePath), url)
+ resolvedPath = path.Join(path.Dir(filePath), linkURL)
}
}
checked := checkedLink{
document: filePath,
- url: url,
+ url: linkURL,
resolvedPath: resolvedPath,
}
if isHTTPOrHTTPS(resolvedPath) {
+ excludeCheck := false
+ for _, str := range linkExcludes {
+ if strings.Contains(resolvedPath, str) {
+ excludeCheck = true
+ break
+ }
+ }
+ if excludeCheck {
+ // Exclude this link and walk to next.
+ excluded = append(excluded, resolvedPath)
+ return blackfriday.GoToNext
+ }
+
// Check external link using HEAD request
client := &http.Client{
Transport: &http.Transport{
@@ -126,6 +152,8 @@ func main() {
if err != nil {
return abort(checkedLinkError(checked, err))
}
+ // Sleep to avoid rate limits.
+ time.Sleep(linkSleep)
} else {
// Check relative path
if _, err := os.Stat(resolvedPath); err != nil {
@@ -145,7 +173,12 @@ func main() {
log.Fatalf("failed validation: %v", resultErr)
}
- log.Printf("validated docs (repoPathsValidated=%d)\n", repoPathsValidated)
+ log.Printf("validated docs (repoPathsValidated=%d, excluded=%d)\n",
+ repoPathsValidated, len(excluded))
+
+ for _, linkURL := range excluded {
+ log.Printf("excluded: %s\n", linkURL)
+ }
}
func isHTTPOrHTTPS(url string) bool {
diff --git a/src/cmd/tools/dtest/docker/harness/carbon_test.go b/src/cmd/tools/dtest/docker/harness/carbon_test.go
new file mode 100644
index 0000000000..199a770a43
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/carbon_test.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package harness
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/cmd/tools/dtest/docker/harness/resources"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func findVerifier(expected string) resources.GoalStateVerifier {
+ return func(s string, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if s == expected {
+ return nil
+ }
+
+ return fmt.Errorf("%s does not match expected:%s", s, expected)
+ }
+}
+
+func renderVerifier(metric string, v float64) resources.GoalStateVerifier {
+ type graphiteRender struct {
+ Target string `json:"target"`
+ Datapoints [][]float64 `json:"datapoints"`
+ }
+
+ return func(s string, err error) error {
+ if err != nil {
+ return err
+ }
+
+ var render []graphiteRender
+ if err := json.Unmarshal([]byte(s), &render); err != nil {
+ return err
+ }
+
+ if len(render) != 1 {
+ return fmt.Errorf("expected one result, got %d", len(render))
+ }
+
+ for _, dps := range render[0].Datapoints {
+ // NB: graphite presents datapoints as an array [value, timestamp]
+ // i.e. value: dps[0], timestamp: dps[1]
+ if len(dps) != 2 {
+ return fmt.Errorf("expected two values in result, got %d", len(dps))
+ }
+
+ if dps[0] == v {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("could not find point with value %f", v)
+ }
+}
+
+func graphiteQuery(target string, start time.Time) string {
+ from := start.Add(time.Minute * -2).Unix()
+ until := start.Add(time.Minute * 2).Unix()
+ return fmt.Sprintf("api/v1/graphite/render?target=%s&from=%d&until=%d",
+ target, from, until)
+}
+
+func graphiteFind(query string) string {
+ return fmt.Sprintf("api/v1/graphite/metrics/find?query=%s", query)
+}
+
+func TestCarbon(t *testing.T) {
+ coord := singleDBNodeDockerResources.Coordinator()
+
+ timestamp := time.Now()
+
+ write := func(metric string, value float64) {
+ assert.NoError(t, coord.WriteCarbon(7204, metric, value, timestamp))
+ }
+
+ read := func(metric string, expected float64) {
+ assert.NoError(t, coord.RunQuery(
+ renderVerifier(metric, expected),
+ graphiteQuery(metric, timestamp)))
+ }
+
+ // NB: since carbon writes are aggregated, it might be up to 10 seconds for
+ // these points to appear in queries. Because of this, write data for all
+ // test cases at once, then query all of them to reduce test duration.
+ aggMetric := "foo.min.aggregate.baz"
+ write(aggMetric, 41)
+ write(aggMetric, 42)
+ write(aggMetric, 40)
+
+ unaggregatedMetric := "foo.min.already-aggregated.baz"
+ write(unaggregatedMetric, 41)
+ write(unaggregatedMetric, 42)
+
+ meanMetric := "foo.min.catch-all.baz"
+ write(meanMetric, 10)
+ write(meanMetric, 20)
+
+ colonMetric := "foo.min:biz.baz.qux"
+ write(colonMetric, 42)
+
+ // Seed some points for find endpoints.
+ findMetrics := []string{
+ "a",
+ "a.bar",
+ "a.biz",
+ "a.biz.cake",
+ "a.bar.caw.daz",
+ "a.bag",
+ "c:bar.c:baz",
+ }
+
+ for _, m := range findMetrics {
+ write(m, 0)
+ }
+
+ // Test for min aggregation. 40 should win since it has min aggergation.
+ read(aggMetric, 40)
+ // Test that unaggregated metrics upsert.
+ read(unaggregatedMetric, 42)
+ // Test that unaggregated metrics upsert.
+ read(meanMetric, 15)
+ // Test that metrics with colons in them are written correctly.
+ read(colonMetric, 42)
+
+ // Test find endpoints with various queries.
+ findResults := map[string]string{
+ "a.b*.caw.*": `[{"id":"a.b*.caw.daz","text":"daz","leaf":1,"expandable":0,"allowChildren":0}]`,
+ "a.b*.c*": `[{"id":"a.b*.cake","text":"cake","leaf":1,"expandable":0,"allowChildren":0},{"id":"a.b*.caw","text":"caw","leaf":0,"expandable":1,"allowChildren":1}]`,
+ "a.b*": `[{"id":"a.bar","text":"bar","leaf":1,"expandable":0,"allowChildren":0},{"id":"a.bar","text":"bar","leaf":0,"expandable":1,"allowChildren":1},{"id":"a.biz","text":"biz","leaf":1,"expandable":0,"allowChildren":0},{"id":"a.biz","text":"biz","leaf":0,"expandable":1,"allowChildren":1},{"id":"a.bag","text":"bag","leaf":1,"expandable":0,"allowChildren":0}]`,
+ "a.ba[rg]": `[{"id":"a.bag","text":"bag","leaf":1,"expandable":0,"allowChildren":0},{"id":"a.bar","text":"bar","leaf":1,"expandable":0,"allowChildren":0},{"id":"a.bar","text":"bar","leaf":0,"expandable":1,"allowChildren":1}]`,
+ "a*": `[{"id":"a","text":"a","leaf":1,"expandable":0,"allowChildren":0},{"id":"a","text":"a","leaf":0,"expandable":1,"allowChildren":1}]`,
+ "c:*": `[{"id":"c:bar","text":"c:bar","leaf":0,"expandable":1,"allowChildren":1}]`,
+ "c:bar.*": `[{"id":"c:bar.c:baz","text":"c:baz","leaf":1,"expandable":0,"allowChildren":0}]`,
+ "x": "[]",
+ "a.d": "[]",
+ "*.*.*.*.*": "[]",
+ }
+
+ for query, ex := range findResults {
+ assert.NoError(t, coord.RunQuery(
+ findVerifier(ex), graphiteFind(query)))
+ }
+}
diff --git a/src/cmd/tools/dtest/docker/harness/cold_write_test.go b/src/cmd/tools/dtest/docker/harness/cold_write_test.go
new file mode 100644
index 0000000000..0531e074a5
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/cold_write_test.go
@@ -0,0 +1,134 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package harness
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/cmd/tools/dtest/docker/harness/resources"
+ "github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type dp struct {
+ t time.Time
+ v float64
+}
+
+func writeReq(ns, id string, dp dp) *rpc.WriteRequest {
+ return &rpc.WriteRequest{
+ NameSpace: ns,
+ ID: id,
+ Datapoint: &rpc.Datapoint{
+ Timestamp: dp.t.Unix(),
+ Value: dp.v,
+ },
+ }
+}
+
+func fetchReq(ns, id string) *rpc.FetchRequest {
+ return &rpc.FetchRequest{
+ NameSpace: ns,
+ ID: id,
+ RangeStart: 0,
+ RangeEnd: time.Now().Unix(),
+ }
+}
+
+func ago(mins time.Duration) time.Time {
+ return time.Now().Add(time.Minute * -mins)
+}
+
+func verifyFetch(t *testing.T, res *rpc.FetchResult_, exDps ...dp) {
+ dps := res.GetDatapoints()
+ require.Equal(t, len(dps), len(exDps))
+
+ for i, dp := range exDps {
+ other := dps[i]
+ assert.Equal(t, dp.t.Unix(), other.GetTimestamp())
+ assert.Equal(t, dp.v, other.GetValue())
+ }
+}
+
+func hasFileVerifier(filter string) resources.GoalStateVerifier {
+ return func(out string, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if len(filter) == 0 {
+ return nil
+ }
+
+ re := regexp.MustCompile(filter)
+ lines := strings.Split(out, "\n")
+ for _, line := range lines {
+ if re.MatchString(line) {
+ return nil
+ }
+ }
+
+ return errors.New("no matches")
+ }
+}
+
+func TestColdWritesSimple(t *testing.T) {
+ node := singleDBNodeDockerResources.Nodes()[0]
+ warmDp := dp{t: ago(20), v: 12.3456789}
+ req := writeReq(resources.ColdWriteNsName, "foo", warmDp)
+ require.NoError(t, node.WritePoint(req))
+
+ fetch, err := node.Fetch(fetchReq(resources.ColdWriteNsName, "foo"))
+ require.NoError(t, err)
+ verifyFetch(t, fetch, warmDp)
+
+ coldDp := dp{t: ago(120), v: 98.7654321}
+ req = writeReq(resources.ColdWriteNsName, "foo", coldDp)
+ require.NoError(t, node.WritePoint(req))
+
+ fetch, err = node.Fetch(fetchReq(resources.ColdWriteNsName, "foo"))
+ require.NoError(t, err)
+ verifyFetch(t, fetch, coldDp, warmDp)
+
+ err = node.GoalStateExec(hasFileVerifier(".*1-checkpoint.db"),
+ "find",
+ "/var/lib/m3db/data/coldWritesRepairAndNoIndex",
+ "-name",
+ "*1-checkpoint.db")
+
+ assert.NoError(t, err)
+
+ err = node.Restart()
+ require.NoError(t, err)
+
+ err = node.WaitForBootstrap()
+ require.NoError(t, err)
+
+ fetch, err = node.Fetch(fetchReq(resources.ColdWriteNsName, "foo"))
+ require.NoError(t, err)
+ verifyFetch(t, fetch, coldDp, warmDp)
+}
diff --git a/src/cmd/tools/dtest/docker/harness/harness_test.go b/src/cmd/tools/dtest/docker/harness/harness_test.go
new file mode 100644
index 0000000000..c458eef46e
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/harness_test.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package harness
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/cmd/tools/dtest/docker/harness/resources"
+)
+
+var singleDBNodeDockerResources resources.DockerResources
+
+func TestMain(m *testing.M) {
+ // TODO(bodu): Add a BK step/tag for dtests, these need to be run on a bare machine.
+
+ //var err error
+ //singleDBNodeDockerResources, err = resources.SetupSingleM3DBNode()
+
+ //if err != nil {
+ // fmt.Println("could not set up db docker containers", err)
+ // os.Exit(1)
+ //}
+
+ //if l := len(singleDBNodeDockerResources.Nodes()); l != 1 {
+ // singleDBNodeDockerResources.Cleanup()
+ // fmt.Println("should only have a single node, have", l)
+ // os.Exit(1)
+ //}
+
+ //code := m.Run()
+ //singleDBNodeDockerResources.Cleanup()
+ //os.Exit(code)
+}
diff --git a/src/cmd/tools/dtest/docker/harness/resources/common.go b/src/cmd/tools/dtest/docker/harness/resources/common.go
new file mode 100644
index 0000000000..6263ed7eb0
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/common.go
@@ -0,0 +1,201 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package resources
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/gogo/protobuf/jsonpb"
+ "github.com/gogo/protobuf/proto"
+ dockertest "github.com/ory/dockertest"
+ dc "github.com/ory/dockertest/docker"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+var (
+ networkName = "d-test"
+ volumeName = "d-test"
+
+ errClosed = errors.New("container has been closed")
+)
+
+func zapMethod(s string) zapcore.Field { return zap.String("method", s) }
+
+type dockerResourceOptions struct {
+ overrideDefaults bool
+ source string
+ containerName string
+ dockerFile string
+ portList []int
+ mounts []string
+ iOpts instrument.Options
+}
+
+// NB: this will fill unset fields with given default values.
+func (o dockerResourceOptions) withDefaults(
+ defaultOpts dockerResourceOptions) dockerResourceOptions {
+ if o.overrideDefaults {
+ return o
+ }
+
+ if len(o.source) == 0 {
+ o.source = defaultOpts.source
+ }
+
+ if len(o.containerName) == 0 {
+ o.containerName = defaultOpts.containerName
+ }
+
+ if len(o.dockerFile) == 0 {
+ o.dockerFile = defaultOpts.dockerFile
+ }
+
+ if len(o.portList) == 0 {
+ o.portList = defaultOpts.portList
+ }
+
+ if len(o.mounts) == 0 {
+ o.mounts = defaultOpts.mounts
+ }
+
+ if o.iOpts == nil {
+ o.iOpts = defaultOpts.iOpts
+ }
+
+ return o
+}
+
+func newOptions(name string) *dockertest.RunOptions {
+ return &dockertest.RunOptions{
+ Name: name,
+ NetworkID: networkName,
+ }
+}
+
+func setupNetwork(pool *dockertest.Pool) error {
+ networks, err := pool.Client.ListNetworks()
+ if err != nil {
+ return err
+ }
+
+ for _, n := range networks {
+ if n.Name == networkName {
+ if err := pool.Client.RemoveNetwork(networkName); err != nil {
+ return err
+ }
+
+ break
+ }
+ }
+
+ _, err = pool.Client.CreateNetwork(dc.CreateNetworkOptions{Name: networkName})
+ return err
+}
+
+func setupVolume(pool *dockertest.Pool) error {
+ volumes, err := pool.Client.ListVolumes(dc.ListVolumesOptions{})
+ if err != nil {
+ return err
+ }
+
+ for _, v := range volumes {
+ if volumeName == v.Name {
+ if err := pool.Client.RemoveVolume(volumeName); err != nil {
+ return err
+ }
+
+ break
+ }
+ }
+
+ _, err = pool.Client.CreateVolume(dc.CreateVolumeOptions{
+ Name: volumeName,
+ })
+
+ return err
+}
+
+func exposePorts(
+ opts *dockertest.RunOptions,
+ portList []int,
+) *dockertest.RunOptions {
+ ports := make(map[dc.Port][]dc.PortBinding, len(portList))
+ for _, p := range portList {
+ port := fmt.Sprintf("%d", p)
+
+ portRepresentation := dc.Port(fmt.Sprintf("%s/tcp", port))
+ binding := dc.PortBinding{HostIP: "0.0.0.0", HostPort: port}
+ entry, found := ports[portRepresentation]
+ if !found {
+ entry = []dc.PortBinding{binding}
+ } else {
+ entry = append(entry, binding)
+ }
+
+ ports[portRepresentation] = entry
+ }
+
+ opts.PortBindings = ports
+ return opts
+}
+
+func getDockerfile(file string) string {
+ src, _ := os.Getwd()
+ return fmt.Sprintf("%s/%s", src, file)
+}
+
+func toResponse(
+ resp *http.Response,
+ response proto.Message,
+ logger *zap.Logger,
+) error {
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ logger.Error("could not read body", zap.Error(err))
+ return err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode/100 != 2 {
+ logger.Error("status code not 2xx",
+ zap.Int("status code", resp.StatusCode),
+ zap.String("status", resp.Status))
+ return fmt.Errorf("status code %d", resp.StatusCode)
+ }
+
+ err = jsonpb.Unmarshal(bytes.NewReader(b), response)
+ if err != nil {
+ logger.Error("unable to unmarshal response",
+ zap.Error(err),
+ zap.Any("response", response))
+ return err
+ }
+
+ return nil
+}
diff --git a/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.Dockerfile b/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.Dockerfile
new file mode 100644
index 0000000000..994ed23c8b
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.Dockerfile
@@ -0,0 +1,12 @@
+FROM alpine:latest AS builder
+LABEL maintainer="The M3DB Authors "
+
+RUN mkdir -p /bin
+RUN mkdir -p /etc/m3coordinator
+ADD ./m3coordinator /bin/
+ADD ./m3coordinator.yml /etc/m3coordinator.yml
+
+EXPOSE 7201/tcp 7203/tcp 7204/tcp
+
+ENTRYPOINT [ "/bin/m3coordinator" ]
+CMD [ "-f", "/etc/m3coordinator.yml" ]
diff --git a/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.yml b/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.yml
new file mode 100644
index 0000000000..25da9de16f
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.yml
@@ -0,0 +1,62 @@
+listenAddress:
+ value: "0.0.0.0:7201"
+
+logging:
+ level: info
+
+metrics:
+ scope:
+ prefix: "coordinator"
+ prometheus:
+ handlerPath: /metrics
+ listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+
+clusters:
+ - namespaces:
+ - namespace: aggregated
+ type: aggregated
+ retention: 10h
+ resolution: 5s
+ - namespace: default
+ type: unaggregated
+ retention: 48h
+ client:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - dbnode01:2379
+ writeConsistencyLevel: majority
+ readConsistencyLevel: unstrict_majority
+
+carbon:
+ ingester:
+ listenAddress: "0.0.0.0:7204"
+ rules:
+ - pattern: .*min.aggregate.*
+ aggregation:
+ type: min
+ policies:
+ - resolution: 5s
+ retention: 10h
+ - pattern: .*already-aggregated.*
+ aggregation:
+ enabled: false
+ policies:
+ - resolution: 5s
+ retention: 10h
+ - pattern: .*
+ policies:
+ - resolution: 5s
+ retention: 10h
+
+tagOptions:
+ idScheme: quoted
diff --git a/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.Dockerfile b/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.Dockerfile
new file mode 100644
index 0000000000..b5bf62d1a4
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.Dockerfile
@@ -0,0 +1,12 @@
+FROM alpine:latest AS builder
+LABEL maintainer="The M3DB Authors "
+
+RUN mkdir -p /bin
+RUN mkdir -p /etc/m3dbnode
+ADD ./m3dbnode /bin/
+ADD ./m3dbnode.yml /etc/m3dbnode/m3dbnode.yml
+
+EXPOSE 2379/tcp 2380/tcp 7201/tcp 7203/tcp 9000-9004/tcp
+
+ENTRYPOINT [ "/bin/m3dbnode" ]
+CMD [ "-f", "/etc/m3dbnode/m3dbnode.yml" ]
diff --git a/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.yml b/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.yml
new file mode 100644
index 0000000000..6970916644
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.yml
@@ -0,0 +1,106 @@
+coordinator:
+ listenAddress:
+ value: "0.0.0.0:7201"
+
+ local:
+ namespaces:
+ - namespace: default
+ type: unaggregated
+ retention: 48h
+
+ logging:
+ level: info
+
+ metrics:
+ scope:
+ prefix: "coordinator"
+ prometheus:
+ handlerPath: /metrics
+ listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: none
+
+ limits:
+ maxComputedDatapoints: 10000
+
+ tagOptions:
+ # Configuration setting for generating metric IDs from tags.
+ idScheme: quoted
+
+db:
+ logging:
+ level: info
+
+ metrics:
+ prometheus:
+ handlerPath: /metrics
+ sanitization: prometheus
+ samplingRate: 1.0
+ extended: detailed
+
+ listenAddress: 0.0.0.0:9000
+ clusterListenAddress: 0.0.0.0:9001
+ httpNodeListenAddress: 0.0.0.0:9002
+ httpClusterListenAddress: 0.0.0.0:9003
+ debugListenAddress: 0.0.0.0:9004
+
+ hostID:
+ resolver: config
+ value: m3db_local
+
+ client:
+ writeConsistencyLevel: majority
+ readConsistencyLevel: unstrict_majority
+
+ gcPercentage: 100
+
+ writeNewSeriesAsync: true
+ writeNewSeriesLimitPerSecond: 1048576
+ writeNewSeriesBackoffDuration: 2ms
+
+ bootstrap:
+ bootstrappers:
+ - filesystem
+ - commitlog
+ - peers
+ - uninitialized_topology
+ commitlog:
+ returnUnfulfilledForCorruptCommitLogFiles: false
+
+ cache:
+ series:
+ policy: lru
+ postingsList:
+ size: 262144
+
+ commitlog:
+ flushMaxBytes: 524288
+ flushEvery: 1s
+ queue:
+ calculationType: fixed
+ size: 2097152
+
+ fs:
+ filePathPrefix: /var/lib/m3db
+
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - 127.0.0.1:2379
+ seedNodes:
+ initialCluster:
+ - hostID: m3db_local
+ endpoint: http://127.0.0.1:2380
+
+ # un-comment the lines below to enable Jaeger tracing. See https://www.jaegertracing.io/docs/1.9/getting-started/
+ # for quick local setup (which this config will send data to).
+
+ # tracing:
+ # backend: jaeger
diff --git a/src/cmd/tools/dtest/docker/harness/resources/coordinator.go b/src/cmd/tools/dtest/docker/harness/resources/coordinator.go
new file mode 100644
index 0000000000..00da24e3e1
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/coordinator.go
@@ -0,0 +1,393 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package resources
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+
+ dockertest "github.com/ory/dockertest"
+ "go.uber.org/zap"
+)
+
+const (
+ defaultCoordinatorSource = "coordinator"
+ defaultCoordinatorName = "coord01"
+ defaultCoordinatorDockerfile = "resources/config/m3coordinator.Dockerfile"
+)
+
+var (
+ defaultCoordinatorList = []int{7201, 7203, 7204}
+
+ defaultCoordinatorOptions = dockerResourceOptions{
+ source: defaultCoordinatorSource,
+ containerName: defaultCoordinatorName,
+ dockerFile: defaultCoordinatorDockerfile,
+ portList: defaultCoordinatorList,
+ }
+)
+
+// Coordinator is a wrapper for a coordinator. It provides a wrapper on HTTP
+// endpoints that expose cluster management APIs as well as read and write
+// endpoints for series data.
+// TODO: consider having this work on underlying structures.
+type Coordinator interface {
+ Admin
+
+ // WriteCarbon writes a carbon metric datapoint at a given time.
+ WriteCarbon(port int, metric string, v float64, t time.Time) error
+ // RunQuery runs the given query with a given verification function.
+ RunQuery(verifier GoalStateVerifier, query string) error
+}
+
+// Admin is a wrapper for admin functions.
+type Admin interface {
+ // GetNamespace gets namespaces.
+ GetNamespace() (admin.NamespaceGetResponse, error)
+ // WaitForNamespace blocks until the given namespace is enabled.
+ // NB: if the name string is empty, this will instead
+ // check for a successful response.
+ WaitForNamespace(name string) error
+ // AddNamespace adds a namespace.
+ AddNamespace(admin.NamespaceAddRequest) (admin.NamespaceGetResponse, error)
+ // CreateDatabase creates a database.
+ CreateDatabase(admin.DatabaseCreateRequest) (admin.DatabaseCreateResponse, error)
+ // GetPlacement gets placements.
+ GetPlacement() (admin.PlacementGetResponse, error)
+ // WaitForInstances blocks until the given instance is available.
+ WaitForInstances(ids []string) error
+ // Close closes the wrapper and releases any held resources, including
+ // deleting docker containers.
+ Close() error
+}
+
+type coordinator struct {
+ resource *dockerResource
+}
+
+func newDockerHTTPCoordinator(
+ pool *dockertest.Pool,
+ opts dockerResourceOptions,
+) (Coordinator, error) {
+ opts = opts.withDefaults(defaultCoordinatorOptions)
+ opts.mounts = []string{"/etc/m3coordinator/"}
+
+ resource, err := newDockerResource(pool, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return &coordinator{
+ resource: resource,
+ }, nil
+}
+
+func (c *coordinator) GetNamespace() (admin.NamespaceGetResponse, error) {
+ if c.resource.closed {
+ return admin.NamespaceGetResponse{}, errClosed
+ }
+
+ url := c.resource.getURL(7201, "api/v1/namespace")
+ logger := c.resource.logger.With(
+ zapMethod("getNamespace"), zap.String("url", url))
+
+ resp, err := http.Get(url)
+ if err != nil {
+ logger.Error("failed get", zap.Error(err))
+ return admin.NamespaceGetResponse{}, err
+ }
+
+ var response admin.NamespaceGetResponse
+ if err := toResponse(resp, &response, logger); err != nil {
+ return admin.NamespaceGetResponse{}, err
+ }
+
+ return response, nil
+}
+
+func (c *coordinator) GetPlacement() (admin.PlacementGetResponse, error) {
+ if c.resource.closed {
+ return admin.PlacementGetResponse{}, errClosed
+ }
+
+ url := c.resource.getURL(7201, "api/v1/placement")
+ logger := c.resource.logger.With(
+ zapMethod("getPlacement"), zap.String("url", url))
+
+ resp, err := http.Get(url)
+ if err != nil {
+ logger.Error("failed get", zap.Error(err))
+ return admin.PlacementGetResponse{}, err
+ }
+
+ var response admin.PlacementGetResponse
+ if err := toResponse(resp, &response, logger); err != nil {
+ return admin.PlacementGetResponse{}, err
+ }
+
+ return response, nil
+}
+
+func (c *coordinator) WaitForNamespace(name string) error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ logger := c.resource.logger.With(zapMethod("waitForNamespace"))
+ return c.resource.pool.Retry(func() error {
+ ns, err := c.GetNamespace()
+ if err != nil {
+ return err
+ }
+
+ // If no name passed in, instad just check for success.
+ if len(name) == 0 {
+ return nil
+ }
+
+ nss := ns.GetRegistry().GetNamespaces()
+ namespace, found := nss[name]
+ if !found {
+ err := fmt.Errorf("no namespace with name %s", name)
+ logger.Error("could not get namespace", zap.Error(err))
+ return err
+ }
+
+ enabled := namespace.GetIndexOptions().GetEnabled()
+ if !enabled {
+ err := fmt.Errorf("namespace %s not enabled", name)
+ logger.Error("namespace not enabled", zap.Error(err))
+ return err
+ }
+
+ logger.Info("namespace ready", zap.String("namespace", name))
+ return nil
+ })
+}
+
+func (c *coordinator) WaitForInstances(
+ ids []string,
+) error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ logger := c.resource.logger.With(zapMethod("waitForPlacement"))
+ return c.resource.pool.Retry(func() error {
+ placement, err := c.GetPlacement()
+ if err != nil {
+ logger.Error("retrying get placement", zap.Error(err))
+ return err
+ }
+
+ logger.Info("got placement", zap.Any("placement", placement))
+ instances := placement.GetPlacement().GetInstances()
+ for _, id := range ids {
+ placement, found := instances[id]
+ if !found {
+ err = fmt.Errorf("no instance with id %s", id)
+ logger.Error("could not get instance", zap.Error(err))
+ return err
+ }
+
+ if pID := placement.GetId(); pID != id {
+ err = fmt.Errorf("id mismatch: instance(%s) != placement(%s)", id, pID)
+ logger.Error("could not get instance", zap.Error(err))
+ return err
+ }
+ }
+
+ logger.Info("instances ready")
+ return nil
+ })
+}
+
+func (c *coordinator) CreateDatabase(
+ addRequest admin.DatabaseCreateRequest,
+) (admin.DatabaseCreateResponse, error) {
+ if c.resource.closed {
+ return admin.DatabaseCreateResponse{}, errClosed
+ }
+
+ url := c.resource.getURL(7201, "api/v1/database/create")
+ logger := c.resource.logger.With(
+ zapMethod("createDatabase"), zap.String("url", url),
+ zap.String("request", addRequest.String()))
+
+ b, err := json.Marshal(addRequest)
+ if err != nil {
+ logger.Error("failed to marshal", zap.Error(err))
+ return admin.DatabaseCreateResponse{}, err
+ }
+
+ resp, err := http.Post(url, "application/json", bytes.NewReader(b))
+ if err != nil {
+ logger.Error("failed post", zap.Error(err))
+ return admin.DatabaseCreateResponse{}, err
+ }
+
+ var response admin.DatabaseCreateResponse
+ if err := toResponse(resp, &response, logger); err != nil {
+ logger.Error("failed response", zap.Error(err))
+ return admin.DatabaseCreateResponse{}, err
+ }
+
+ logger.Info("created database")
+ return response, nil
+}
+
+func (c *coordinator) AddNamespace(
+ addRequest admin.NamespaceAddRequest,
+) (admin.NamespaceGetResponse, error) {
+ if c.resource.closed {
+ return admin.NamespaceGetResponse{}, errClosed
+ }
+
+ url := c.resource.getURL(7201, "api/v1/services/m3db/namespace")
+ logger := c.resource.logger.With(
+ zapMethod("addNamespace"), zap.String("url", url),
+ zap.String("request", addRequest.String()))
+
+ b, err := json.Marshal(addRequest)
+ if err != nil {
+ logger.Error("failed to marshal", zap.Error(err))
+ return admin.NamespaceGetResponse{}, err
+ }
+
+ resp, err := http.Post(url, "application/json", bytes.NewReader(b))
+ if err != nil {
+ logger.Error("failed post", zap.Error(err))
+ return admin.NamespaceGetResponse{}, err
+ }
+
+ var response admin.NamespaceGetResponse
+ if err := toResponse(resp, &response, logger); err != nil {
+ return admin.NamespaceGetResponse{}, err
+ }
+
+ return response, nil
+}
+
+func (c *coordinator) WriteCarbon(
+ port int, metric string, v float64, t time.Time,
+) error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ url := c.resource.resource.GetHostPort(fmt.Sprintf("%d/tcp", port))
+ logger := c.resource.logger.With(
+ zapMethod("writeCarbon"), zap.String("url", url),
+ zap.String("at time", time.Now().String()),
+ zap.String("at ts", t.String()))
+
+ con, err := net.Dial("tcp", url)
+ if err != nil {
+ logger.Error("could not dial", zap.Error(err))
+ return err
+ }
+
+ write := fmt.Sprintf("%s %f %d", metric, v, t.Unix())
+ logger.Info("writing", zap.String("metric", write))
+ n, err := con.Write([]byte(write))
+ if err != nil {
+ logger.Error("could not write", zap.Error(err))
+ }
+
+ if n != len(write) {
+ err := fmt.Errorf("wrote %d, wanted %d", n, len(write))
+ logger.Error("write failure", zap.Error(err))
+ return err
+ }
+
+ logger.Info("write success", zap.Int("bytes written", n))
+ return con.Close()
+ // return nil
+}
+
+func (c *coordinator) query(
+ verifier GoalStateVerifier, query string,
+) error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ url := c.resource.getURL(7201, query)
+ logger := c.resource.logger.With(
+ zapMethod("query"), zap.String("url", url))
+ logger.Info("running")
+ resp, err := http.Get(url)
+ if err != nil {
+ logger.Error("failed get", zap.Error(err))
+ return err
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode/100 != 2 {
+ logger.Error("status code not 2xx",
+ zap.Int("status code", resp.StatusCode),
+ zap.String("status", resp.Status))
+ return fmt.Errorf("status code %d", resp.StatusCode)
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ return verifier(string(b), err)
+}
+
+func (c *coordinator) RunQuery(
+ verifier GoalStateVerifier, query string,
+) error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ logger := c.resource.logger.With(zapMethod("runQuery"),
+ zap.String("query", query))
+ err := c.resource.pool.Retry(func() error {
+ err := c.query(verifier, query)
+ if err != nil {
+ logger.Info("retrying", zap.Error(err))
+ }
+
+ return err
+ })
+
+ if err != nil {
+ logger.Error("failed run", zap.Error(err))
+ }
+
+ return err
+}
+
+func (c *coordinator) Close() error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ return c.resource.close()
+}
diff --git a/src/cmd/tools/dtest/docker/harness/resources/dbnode.go b/src/cmd/tools/dtest/docker/harness/resources/dbnode.go
new file mode 100644
index 0000000000..2041d5f053
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/dbnode.go
@@ -0,0 +1,278 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package resources
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
+ "github.com/m3db/m3/src/dbnode/integration"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+ xerrors "github.com/m3db/m3/src/x/errors"
+
+ dockertest "github.com/ory/dockertest"
+ "go.uber.org/zap"
+)
+
+const (
+ defaultDBNodeSource = "dbnode"
+ defaultDBNodeContainerName = "dbnode01"
+ defaultDBNodeDockerfile = "resources/config/m3dbnode.Dockerfile"
+)
+
+var (
+ defaultDBNodePortList = []int{2379, 2380, 9000, 9001, 9002, 9003, 9004}
+
+ defaultDBNodeOptions = dockerResourceOptions{
+ source: defaultDBNodeSource,
+ containerName: defaultDBNodeContainerName,
+ dockerFile: getDockerfile(defaultDBNodeDockerfile),
+ portList: defaultDBNodePortList,
+ }
+)
+
+// GoalStateVerifier verifies that the given results are valid.
+type GoalStateVerifier func(string, error) error
+
+// Nodes is a slice of nodes.
+type Nodes []Node
+
+func (n Nodes) waitForHealthy() error {
+ var (
+ multiErr xerrors.MultiError
+ mu sync.Mutex
+ wg sync.WaitGroup
+ )
+
+ for _, node := range n {
+ wg.Add(1)
+ node := node
+ go func() {
+ defer wg.Done()
+ err := node.WaitForBootstrap()
+ if err != nil {
+ mu.Lock()
+ multiErr = multiErr.Add(err)
+ mu.Unlock()
+ }
+ }()
+ }
+
+ wg.Wait()
+ return multiErr.FinalError()
+}
+
+// Node is a wrapper for a db node. It provides a wrapper on HTTP
+// endpoints that expose cluster management APIs as well as read and write
+// endpoints for series data.
+// TODO: consider having this work on underlying structures.
+type Node interface {
+ // HostDetails returns this node's host details on the given port.
+ HostDetails(port int) (*admin.Host, error)
+ // Health gives this node's health.
+ Health() (*rpc.NodeHealthResult_, error)
+ // WaitForBootstrap blocks until the node has bootstrapped.
+ WaitForBootstrap() error
+ // WritePoint writes a datapoint to the node directly.
+ WritePoint(req *rpc.WriteRequest) error
+ // Fetch fetches datapoints.
+ Fetch(req *rpc.FetchRequest) (*rpc.FetchResult_, error)
+ // Exec executes the given commands on the node container, returning
+ // stdout and stderr from the container.
+ Exec(commands ...string) (string, error)
+ // GoalStateExec executes the given commands on the node container, retrying
+ // until applying the verifier returns no error or the default timeout.
+ GoalStateExec(verifier GoalStateVerifier, commands ...string) error
+ // Restart restarts this container.
+ Restart() error
+ // Close closes the wrapper and releases any held resources, including
+ // deleting docker containers.
+ Close() error
+}
+
+type dbNode struct {
+ opts dockerResourceOptions
+
+ tchanClient *integration.TestTChannelClient
+ resource *dockerResource
+}
+
+func newDockerHTTPNode(
+ pool *dockertest.Pool,
+ opts dockerResourceOptions,
+) (Node, error) {
+ opts = opts.withDefaults(defaultDBNodeOptions)
+ resource, err := newDockerResource(pool, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ completed := false
+ defer func() {
+ if !completed {
+ resource.close()
+ }
+ }()
+
+ addr := resource.resource.GetHostPort("9000/tcp")
+ tchanClient, err := integration.NewTChannelClient("client", addr)
+ if err != nil {
+ return nil, err
+ }
+
+ resource.logger.Info("set up tchanClient", zap.String("node_addr", addr))
+ completed = true
+ return &dbNode{
+ opts: opts,
+
+ tchanClient: tchanClient,
+ resource: resource,
+ }, nil
+}
+
+func (c *dbNode) HostDetails(p int) (*admin.Host, error) {
+ port, err := c.resource.getPort(p)
+ if err != nil {
+ return nil, err
+ }
+
+ return &admin.Host{
+ Id: "m3db_local",
+ IsolationGroup: "rack-a",
+ Zone: "embedded",
+ Weight: 1024,
+ Address: c.opts.containerName,
+ Port: uint32(port),
+ }, nil
+}
+
+func (c *dbNode) Health() (*rpc.NodeHealthResult_, error) {
+ if c.resource.closed {
+ return nil, errClosed
+ }
+
+ logger := c.resource.logger.With(zapMethod("health"))
+ res, err := c.tchanClient.TChannelClientHealth(timeout)
+ if err != nil {
+ logger.Error("failed get", zap.Error(err), zap.Any("res", res))
+ }
+
+ return res, err
+}
+
+func (c *dbNode) WaitForBootstrap() error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ logger := c.resource.logger.With(zapMethod("waitForBootstrap"))
+ return c.resource.pool.Retry(func() error {
+ health, err := c.Health()
+ if err != nil {
+ return err
+ }
+
+ if !health.GetBootstrapped() {
+ err = fmt.Errorf("not bootstrapped")
+ logger.Error("could not get health", zap.Error(err))
+ return err
+ }
+
+ return nil
+ })
+}
+
+func (c *dbNode) WritePoint(req *rpc.WriteRequest) error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ logger := c.resource.logger.With(zapMethod("write"))
+ err := c.tchanClient.TChannelClientWrite(timeout, req)
+ if err != nil {
+ logger.Error("could not write", zap.Error(err))
+ return err
+ }
+
+ logger.Info("wrote")
+ return nil
+}
+
+func (c *dbNode) Fetch(req *rpc.FetchRequest) (*rpc.FetchResult_, error) {
+ if c.resource.closed {
+ return nil, errClosed
+ }
+
+ logger := c.resource.logger.With(zapMethod("fetch"))
+ dps, err := c.tchanClient.TChannelClientFetch(timeout, req)
+ if err != nil {
+ logger.Error("could not fetch", zap.Error(err))
+ return nil, err
+ }
+
+ logger.Info("fetched", zap.Int("num_points", len(dps.GetDatapoints())))
+ return dps, nil
+}
+
+func (c *dbNode) Restart() error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ cName := c.opts.containerName
+ logger := c.resource.logger.With(zapMethod("restart"))
+ logger.Info("restarting container", zap.String("container", cName))
+ err := c.resource.pool.Client.RestartContainer(cName, 60)
+ if err != nil {
+ logger.Error("could not restart", zap.Error(err))
+ return err
+ }
+
+ return nil
+}
+
+func (c *dbNode) Exec(commands ...string) (string, error) {
+ if c.resource.closed {
+ return "", errClosed
+ }
+
+ return c.resource.exec(commands...)
+}
+
+func (c *dbNode) GoalStateExec(
+ verifier GoalStateVerifier,
+ commands ...string,
+) error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ return c.resource.goalStateExec(verifier, commands...)
+}
+
+func (c *dbNode) Close() error {
+ if c.resource.closed {
+ return errClosed
+ }
+
+ return c.resource.close()
+}
diff --git a/src/cmd/tools/dtest/docker/harness/resources/docker_resource.go b/src/cmd/tools/dtest/docker/harness/resources/docker_resource.go
new file mode 100644
index 0000000000..27782974ba
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/docker_resource.go
@@ -0,0 +1,188 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package resources
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ dockertest "github.com/ory/dockertest"
+ "github.com/ory/dockertest/docker"
+ dc "github.com/ory/dockertest/docker"
+ "github.com/ory/dockertest/docker/types/mount"
+ "go.uber.org/zap"
+)
+
+type dockerResource struct {
+ closed bool
+
+ logger *zap.Logger
+
+ resource *dockertest.Resource
+ pool *dockertest.Pool
+}
+
+func newDockerResource(
+ pool *dockertest.Pool,
+ resourceOpts dockerResourceOptions,
+) (*dockerResource, error) {
+ var (
+ source = resourceOpts.source
+ containerName = resourceOpts.containerName
+ dockerFile = resourceOpts.dockerFile
+ iOpts = resourceOpts.iOpts
+ portList = resourceOpts.portList
+
+ logger = iOpts.Logger().With(
+ zap.String("source", source),
+ zap.String("container", containerName),
+ )
+ )
+
+ if err := pool.RemoveContainerByName(containerName); err != nil {
+ logger.Error("could not remove container from pool", zap.Error(err))
+ return nil, err
+ }
+
+ opts := exposePorts(newOptions(containerName), portList)
+ logger.Info("building container with options",
+ zap.String("dockerFile", dockerFile), zap.Any("options", opts))
+ resource, err := pool.BuildAndRunWithOptions(dockerFile, opts,
+ func(c *dc.HostConfig) {
+ c.NetworkMode = networkName
+ mounts := make([]dc.HostMount, 0, len(resourceOpts.mounts))
+ for _, m := range resourceOpts.mounts {
+ mounts = append(mounts, dc.HostMount{
+ Target: m,
+ Type: string(mount.TypeTmpfs),
+ })
+ }
+
+ c.Mounts = mounts
+ })
+
+ if err != nil {
+ logger.Error("could not build and run container", zap.Error(err))
+ return nil, err
+ }
+
+ return &dockerResource{
+ logger: logger,
+ resource: resource,
+ pool: pool,
+ }, nil
+}
+
+func (c *dockerResource) getPort(bindPort int) (int, error) {
+ port := c.resource.GetPort(fmt.Sprintf("%d/tcp", bindPort))
+ return strconv.Atoi(port)
+}
+
+func (c *dockerResource) getURL(port int, path string) string {
+ tcpPort := fmt.Sprintf("%d/tcp", port)
+ return fmt.Sprintf("http://%s:%s/%s",
+ c.resource.GetBoundIP(tcpPort), c.resource.GetPort(tcpPort), path)
+}
+
+func (c *dockerResource) exec(commands ...string) (string, error) {
+ if c.closed {
+ return "", errClosed
+ }
+
+ // NB: this is prefixed with a `/` that should be trimmed off.
+ name := strings.TrimLeft(c.resource.Container.Name, "/")
+ logger := c.logger.With(zapMethod("exec"))
+ client := c.pool.Client
+ exec, err := client.CreateExec(docker.CreateExecOptions{
+ AttachStdout: true,
+ AttachStderr: true,
+ Container: name,
+ Cmd: commands,
+ })
+
+ if err != nil {
+ logger.Error("failed generating exec", zap.Error(err))
+ return "", err
+ }
+
+ var outBuf, errBuf bytes.Buffer
+ logger.Info("starting exec",
+ zap.Strings("commands", commands),
+ zap.String("execID", exec.ID))
+ err = client.StartExec(exec.ID, docker.StartExecOptions{
+ OutputStream: &outBuf,
+ ErrorStream: &errBuf,
+ })
+
+ output, bufferErr := outBuf.String(), errBuf.String()
+ logger = logger.With(zap.String("stdout", output),
+ zap.String("stderr", bufferErr))
+
+ if err != nil {
+ logger.Error("failed starting exec",
+ zap.Error(err))
+ return "", err
+ }
+
+ if len(bufferErr) != 0 {
+ err = errors.New(bufferErr)
+ logger.Error("exec failed", zap.Error(err))
+ return "", err
+ }
+
+ logger.Info("succeeded exec")
+ return output, nil
+}
+
+func (c *dockerResource) goalStateExec(
+ verifier GoalStateVerifier,
+ commands ...string,
+) error {
+ if c.closed {
+ return errClosed
+ }
+
+ logger := c.logger.With(zapMethod("goalStateExec"))
+ return c.pool.Retry(func() error {
+ err := verifier(c.exec(commands...))
+ if err != nil {
+ logger.Error("rerunning goal state verification", zap.Error(err))
+ return err
+ }
+
+ logger.Info("goal state verification succeeded")
+ return nil
+ })
+}
+
+func (c *dockerResource) close() error {
+ if c.closed {
+ c.logger.Error("closing closed resource", zap.Error(errClosed))
+ return errClosed
+ }
+
+ c.closed = true
+ c.logger.Info("closing resource")
+ return c.pool.Purge(c.resource)
+}
diff --git a/src/cmd/tools/dtest/docker/harness/resources/harness.go b/src/cmd/tools/dtest/docker/harness/resources/harness.go
new file mode 100644
index 0000000000..728d91e129
--- /dev/null
+++ b/src/cmd/tools/dtest/docker/harness/resources/harness.go
@@ -0,0 +1,247 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package resources
+
+import (
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+ xerrors "github.com/m3db/m3/src/x/errors"
+ "github.com/m3db/m3/src/x/instrument"
+ dockertest "github.com/ory/dockertest"
+ "go.uber.org/zap"
+)
+
+const (
+ timeout = time.Second * 60
+ retention = "6h"
+
+ // AggName is the name of the aggregated namespace.
+ AggName = "aggregated"
+ // UnaggName is the name of the unaggregated namespace.
+ UnaggName = "default"
+ // ColdWriteNsName is the name for cold write namespace.
+ ColdWriteNsName = "coldWritesRepairAndNoIndex"
+)
+
+// DockerResources represents a set of dockerized test components.
+type DockerResources interface {
+ // Cleanup closes and removes all corresponding containers.
+ Cleanup() error
+ // Nodes returns all node resources.
+ Nodes() Nodes
+ // Coordinator returns the coordinator resource.
+ Coordinator() Coordinator
+}
+
+type dockerResources struct {
+ coordinator Coordinator
+ nodes Nodes
+
+ pool *dockertest.Pool
+}
+
+// SetupSingleM3DBNode creates docker resources representing a setup with a
+// single DB node.
+func SetupSingleM3DBNode() (DockerResources, error) {
+ pool, err := dockertest.NewPool("")
+ if err != nil {
+ return nil, err
+ }
+
+ pool.MaxWait = timeout
+ err = setupNetwork(pool)
+ if err != nil {
+ return nil, err
+ }
+
+ err = setupVolume(pool)
+ if err != nil {
+ return nil, err
+ }
+
+ iOpts := instrument.NewOptions()
+ dbNode, err := newDockerHTTPNode(pool, dockerResourceOptions{
+ iOpts: iOpts,
+ })
+
+ success := false
+ dbNodes := Nodes{dbNode}
+ defer func() {
+ // NB: only defer close in the failure case, otherwise calling function
+ // is responsible for closing the resources.
+ if !success {
+ for _, dbNode := range dbNodes {
+ if dbNode != nil {
+ dbNode.Close()
+ }
+ }
+ }
+ }()
+
+ if err != nil {
+ return nil, err
+ }
+
+ coordinator, err := newDockerHTTPCoordinator(pool, dockerResourceOptions{
+ iOpts: iOpts,
+ })
+
+ defer func() {
+ // NB: only defer close in the failure case, otherwise calling function
+ // is responsible for closing the resources.
+ if !success && coordinator != nil {
+ coordinator.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, err
+ }
+
+ logger := iOpts.Logger().With(zap.String("source", "harness"))
+ hosts := make([]*admin.Host, 0, len(dbNodes))
+ ids := make([]string, 0, len(dbNodes))
+ for _, n := range dbNodes {
+ h, err := n.HostDetails(9000)
+ if err != nil {
+ logger.Error("could not get host details", zap.Error(err))
+ return nil, err
+ }
+
+ hosts = append(hosts, h)
+ ids = append(ids, h.GetId())
+ }
+
+ var (
+ aggDatabase = admin.DatabaseCreateRequest{
+ Type: "cluster",
+ NamespaceName: AggName,
+ RetentionTime: retention,
+ NumShards: 4,
+ ReplicationFactor: 1,
+ Hosts: hosts,
+ }
+
+ unaggDatabase = admin.DatabaseCreateRequest{
+ NamespaceName: UnaggName,
+ RetentionTime: retention,
+ }
+
+ coldWriteNamespace = admin.NamespaceAddRequest{
+ Name: ColdWriteNsName,
+ Options: &namespace.NamespaceOptions{
+ BootstrapEnabled: true,
+ FlushEnabled: true,
+ WritesToCommitLog: true,
+ CleanupEnabled: true,
+ SnapshotEnabled: true,
+ RepairEnabled: true,
+ ColdWritesEnabled: true,
+ RetentionOptions: &namespace.RetentionOptions{
+ RetentionPeriodNanos: int64(4 * time.Hour),
+ BlockSizeNanos: int64(time.Hour),
+ BufferFutureNanos: int64(time.Minute * 10),
+ BufferPastNanos: int64(time.Minute * 10),
+ BlockDataExpiry: true,
+ BlockDataExpiryAfterNotAccessPeriodNanos: int64(time.Minute * 5),
+ },
+ },
+ }
+ )
+
+ logger.Info("waiting for coordinator")
+ if err := coordinator.WaitForNamespace(""); err != nil {
+ return nil, err
+ }
+
+ logger.Info("creating database", zap.Any("request", aggDatabase))
+ if _, err := coordinator.CreateDatabase(aggDatabase); err != nil {
+ return nil, err
+ }
+
+ logger.Info("waiting for placements", zap.Strings("placement ids", ids))
+ if err := coordinator.WaitForInstances(ids); err != nil {
+ return nil, err
+ }
+
+ logger.Info("waiting for namespace", zap.String("name", AggName))
+ if err := coordinator.WaitForNamespace(AggName); err != nil {
+ return nil, err
+ }
+
+ logger.Info("creating namespace", zap.Any("request", unaggDatabase))
+ if _, err := coordinator.CreateDatabase(unaggDatabase); err != nil {
+ return nil, err
+ }
+
+ logger.Info("waiting for namespace", zap.String("name", UnaggName))
+ if err := coordinator.WaitForNamespace(UnaggName); err != nil {
+ return nil, err
+ }
+
+ logger.Info("creating namespace", zap.Any("request", coldWriteNamespace))
+ if _, err := coordinator.AddNamespace(coldWriteNamespace); err != nil {
+ return nil, err
+ }
+
+ logger.Info("waiting for namespace", zap.String("name", ColdWriteNsName))
+ if err := coordinator.WaitForNamespace(UnaggName); err != nil {
+ return nil, err
+ }
+
+ logger.Info("waiting for healthy")
+ if err := dbNodes.waitForHealthy(); err != nil {
+ return nil, err
+ }
+
+ logger.Info("all healthy")
+ success = true
+ return &dockerResources{
+ coordinator: coordinator,
+ nodes: dbNodes,
+
+ pool: pool,
+ }, err
+}
+
+func (r *dockerResources) Cleanup() error {
+ if r == nil {
+ return nil
+ }
+
+ var multiErr xerrors.MultiError
+ if r.coordinator != nil {
+ multiErr = multiErr.Add(r.coordinator.Close())
+ }
+
+ for _, dbNode := range r.nodes {
+ if dbNode != nil {
+ multiErr = multiErr.Add(dbNode.Close())
+ }
+ }
+
+ return multiErr.FinalError()
+}
+
+func (r *dockerResources) Nodes() Nodes { return r.nodes }
+func (r *dockerResources) Coordinator() Coordinator { return r.coordinator }
diff --git a/src/cmd/tools/m3ctl/README.md b/src/cmd/tools/m3ctl/README.md
new file mode 100644
index 0000000000..208d54aac4
--- /dev/null
+++ b/src/cmd/tools/m3ctl/README.md
@@ -0,0 +1,82 @@
+# M3DB CLI Tool
+
+This is a CLI tool to do some things that may be desirable for
+cluster introspection, or for operational purposes.
+
+Where configuration data is required its provided via YAML.
+
+You can:
+
+* create a database per the simplified database create API
+* list namespaces
+* delete namespaces
+* list placements
+* delete placements
+* add nodes
+* remove nodes
+
+NOTE: This tool can delete namespaces and placements. It can be
+quite hazardous if used without adequate understanding of your m3db
+cluster's topology, or without a decent understanding of how m3db
+works.
+
+## Examples
+
+```
+# show help
+m3ctl -h
+# create a database
+m3ctl apply -f ./database/examples/dbcreate.yaml
+# list namespaces
+m3ctl get ns
+# delete a namespace
+m3ctl delete ns -id default
+# list placements
+m3ctl get pl
+# point to some remote and list namespaces
+m3ctl -endpoint http://localhost:7201 get ns
+# check the namespaces in a kubernetes cluster
+# first setup a tunnel via kubectl port-forward ... 7201
+m3ctl -endpoint http://localhost:7201 get ns
+# list the ids of the placements
+m3ctl -endpoint http://localhost:7201 get pl | jq .placement.instances[].id
+```
+
+Some example yaml files for the "apply" subcommand are provided in the yaml/examples directory.
+Here's one to initialize a topology:
+
+```yaml
+---
+operation: init
+num_shards: 64
+replication_factor: 1
+instances:
+ - id: nodeid1
+ isolation_group: isogroup1
+ zone: etcd1
+ weight: 100
+ endpoint: node1:9000
+ hostname: node1
+ port: 9000
+ - id: nodeid2
+ isolation_group: isogroup2
+ zone: etcd1
+ weight: 100
+ endpoint: node2:9000
+ hostname: node2
+ port: 9000
+ - id: nodeid3
+ isolation_group: isogroup3
+ zone: etcd1
+ weight: 100
+ endpoint: node3:9000
+ hostname: node3
+ port: 9000
+```
+
+See the examples directories below.
+
+# References
+
+ * [Operational guide](https://docs.m3db.io/operational_guide)
+ * [API docs](https://www.m3db.io/openapi/)
diff --git a/src/cmd/tools/m3ctl/apply/apply.go b/src/cmd/tools/m3ctl/apply/apply.go
new file mode 100644
index 0000000000..235020b423
--- /dev/null
+++ b/src/cmd/tools/m3ctl/apply/apply.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package apply
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/client"
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/yaml"
+)
+
+// DoApply does the API call to handle the kubectl apply command
+// It looks at the yaml, figures out what kind of API call to make
+// Sends it all off to the API
+func DoApply(
+ endpoint string,
+ headers map[string]string,
+ filepath string,
+ logger *zap.Logger,
+) ([]byte, error) {
+ path, data, err := yaml.Load(filepath, logger)
+ if err != nil {
+ return nil, err
+ }
+ url := fmt.Sprintf("%s%s", endpoint, path)
+ return client.DoPost(url, headers, data, logger)
+}
diff --git a/src/cmd/tools/m3ctl/client/checker.go b/src/cmd/tools/m3ctl/client/checker.go
new file mode 100644
index 0000000000..c9bbabf79d
--- /dev/null
+++ b/src/cmd/tools/m3ctl/client/checker.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package client
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+
+ "go.uber.org/zap"
+)
+
+func checkForAndHandleError(url string, resp *http.Response, zl *zap.Logger) error {
+ if resp.StatusCode/100 != 2 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err == nil {
+ zl.Error("error response",
+ zap.Error(fmt.Errorf("status %d", resp.StatusCode)),
+ zap.String("url", url),
+ zap.ByteString("response", body))
+ } else {
+ zl.Error("error response",
+ zap.Error(fmt.Errorf("status %d", resp.StatusCode)),
+ zap.String("url", url),
+ zap.Error(fmt.Errorf("response not available: %v", err)))
+ }
+ return fmt.Errorf("error response: status=%s, url=%s", resp.Status, url)
+ }
+ return nil
+}
diff --git a/src/cmd/tools/m3ctl/client/http.go b/src/cmd/tools/m3ctl/client/http.go
new file mode 100644
index 0000000000..fa617a591d
--- /dev/null
+++ b/src/cmd/tools/m3ctl/client/http.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package client
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+const timeout = time.Duration(5 * time.Second)
+
+// DoGet is the low level call to the backend api for gets.
+func DoGet(
+ url string,
+ headers map[string]string,
+ l *zap.Logger,
+) ([]byte, error) {
+ l.Info("request", zap.String("method", "get"), zap.String("url", url))
+ client := http.Client{
+ Timeout: timeout,
+ }
+ req, err := http.NewRequest(http.MethodGet, url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ setHeadersWithDefaults(req, headers)
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ }()
+ if err := checkForAndHandleError(url, resp, l); err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(resp.Body)
+}
+
+// DoPost is the low level call to the backend api for posts.
+func DoPost(
+ url string,
+ headers map[string]string,
+ data io.Reader,
+ l *zap.Logger,
+) ([]byte, error) {
+ l.Info("request", zap.String("method", "post"), zap.String("url", url))
+ client := &http.Client{
+ Timeout: timeout,
+ }
+ req, err := http.NewRequest(http.MethodPost, url, data)
+ if err != nil {
+ return nil, err
+ }
+
+ setHeadersWithDefaults(req, headers)
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ }()
+ if err := checkForAndHandleError(url, resp, l); err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(resp.Body)
+}
+
+// DoDelete is the low level call to the backend api for deletes.
+func DoDelete(
+ url string,
+ headers map[string]string,
+ l *zap.Logger,
+) ([]byte, error) {
+ l.Info("request", zap.String("method", "delete"), zap.String("url", url))
+ client := &http.Client{
+ Timeout: timeout,
+ }
+ req, err := http.NewRequest(http.MethodDelete, url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ setHeadersWithDefaults(req, headers)
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ }()
+ if err := checkForAndHandleError(url, resp, l); err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(resp.Body)
+}
+
+func setHeadersWithDefaults(req *http.Request, headers map[string]string) {
+ req.Header.Set("Content-Type", "application/json")
+ for k, v := range headers {
+ req.Header.Set(k, v)
+ }
+}
diff --git a/src/cmd/tools/m3ctl/main/main.go b/src/cmd/tools/m3ctl/main/main.go
new file mode 100644
index 0000000000..875ffb0e1d
--- /dev/null
+++ b/src/cmd/tools/m3ctl/main/main.go
@@ -0,0 +1,249 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/apply"
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/namespaces"
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/placements"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+
+ "github.com/gogo/protobuf/jsonpb"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ defaultEndpoint = "http://localhost:7201"
+)
+
+// Defaults are (so output is easily consumable by JSON tools like "jq").
+// - Error log level so usually not printing anything unless error encountered
+// so the output can be completely JSON.
+// - Do not print log stack traces so errors aren't overwhelming output.
+var defaultLoggerOptions = loggerOptions{
+ level: zapcore.ErrorLevel,
+ enableStacktrace: false,
+}
+
+type loggerOptions struct {
+ level zapcore.Level
+ enableStacktrace bool
+}
+
+func mustNewLogger(opts loggerOptions) *zap.Logger {
+ loggerCfg := zap.NewDevelopmentConfig()
+ loggerCfg.Level = zap.NewAtomicLevelAt(opts.level)
+ loggerCfg.DisableStacktrace = !opts.enableStacktrace
+ logger, err := loggerCfg.Build()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+
+ return logger
+}
+
+func main() {
+ var (
+ debug bool
+ endPoint string
+ headers = make(map[string]string)
+ yamlPath string
+ showAll bool
+ deleteAll bool
+ nodeName string
+ )
+
+ logger := mustNewLogger(defaultLoggerOptions)
+ defer func() {
+ logger.Sync()
+ fmt.Printf("\n") // End line since most commands finish without an endpoint.
+ }()
+
+ rootCmd := &cobra.Command{
+ Use: "cobra",
+ }
+
+ getCmd := &cobra.Command{
+ Use: "get",
+ Short: "Get specified resources from the remote",
+ }
+
+ deleteCmd := &cobra.Command{
+ Use: "delete",
+ Short: "Delete specified resources from the remote",
+ }
+
+ applyCmd := &cobra.Command{
+ Use: "apply",
+ Short: "Apply various yamls to remote endpoint",
+ Long: `This will take specific yamls and send them over to the remote
+endpoint. See the yaml/examples directory for examples. Operations such as
+database creation, database init, adding a node, and replacing a node, are supported.
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ fileArg := cmd.LocalFlags().Lookup("file").Value.String()
+ logger.Debug("running command", zap.String("name", cmd.Name()), zap.String("args", fileArg))
+
+ if len(fileArg) == 0 {
+ logger.Fatal("need to specify a path to YAML file")
+ }
+
+ resp, err := apply.DoApply(endPoint, headers, yamlPath, logger)
+ if err != nil {
+ logger.Fatal("apply failed", zap.Error(err))
+ }
+
+ os.Stdout.Write(resp)
+ },
+ }
+
+ getNamespaceCmd := &cobra.Command{
+ Use: "namespace []",
+ Short: "Get the namespaces from the remote endpoint",
+ Aliases: []string{"ns"},
+ Run: func(cmd *cobra.Command, args []string) {
+ logger.Debug("running command", zap.String("command", cmd.Name()))
+
+ resp, err := namespaces.DoGet(endPoint, headers, logger)
+ if err != nil {
+ logger.Fatal("get namespace failed", zap.Error(err))
+ }
+
+ if !showAll {
+ var registry admin.NamespaceGetResponse
+ unmarshaller := &jsonpb.Unmarshaler{AllowUnknownFields: true}
+ reader := bytes.NewReader(resp)
+ if err := unmarshaller.Unmarshal(reader, ®istry); err != nil {
+ logger.Fatal("could not unmarshal response", zap.Error(err))
+ }
+ var namespaces []string
+ for k := range registry.Registry.Namespaces {
+ namespaces = append(namespaces, k)
+ }
+ // Keep output consistent and output JSON.
+ if err := json.NewEncoder(os.Stdout).Encode(namespaces); err != nil {
+ logger.Fatal("could not encode output", zap.Error(err))
+ }
+ return
+ }
+
+ os.Stdout.Write(resp)
+ },
+ }
+
+ getPlacementCmd := &cobra.Command{
+ Use: "placement",
+ Short: "Get the placement from the remote endpoint",
+ Aliases: []string{"pl"},
+ Run: func(cmd *cobra.Command, args []string) {
+ logger.Debug("running command", zap.String("command", cmd.Name()))
+
+ resp, err := placements.DoGet(endPoint, headers, logger)
+ if err != nil {
+ logger.Fatal("get placement failed", zap.Error(err))
+ }
+
+ os.Stdout.Write(resp)
+ },
+ }
+
+ deletePlacementCmd := &cobra.Command{
+ Use: "placement",
+ Short: "Delete the placement from the remote endpoint",
+ Aliases: []string{"pl"},
+ Run: func(cmd *cobra.Command, args []string) {
+ logger.Debug("running command", zap.String("command", cmd.Name()))
+
+ resp, err := placements.DoDelete(endPoint, headers, nodeName, deleteAll, logger)
+ if err != nil {
+ logger.Fatal("delete placement failed", zap.Error(err))
+ }
+
+ os.Stdout.Write(resp)
+ },
+ }
+
+ deleteNamespaceCmd := &cobra.Command{
+ Use: "namespace",
+ Short: "Delete the namespace from the remote endpoint",
+ Aliases: []string{"ns"},
+ Run: func(cmd *cobra.Command, args []string) {
+ logger.Debug("running command", zap.String("command", cmd.Name()))
+
+ resp, err := namespaces.DoDelete(endPoint, headers, nodeName, logger)
+ if err != nil {
+ logger.Fatal("delete namespace failed", zap.Error(err))
+ }
+
+ os.Stdout.Write(resp)
+ },
+ }
+
+ rootCmd.AddCommand(getCmd, applyCmd, deleteCmd)
+ getCmd.AddCommand(getNamespaceCmd)
+ getCmd.AddCommand(getPlacementCmd)
+ deleteCmd.AddCommand(deletePlacementCmd)
+ deleteCmd.AddCommand(deleteNamespaceCmd)
+
+ var headersSlice []string
+ rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "d", false, "debug log output level (cannot use JSON output)")
+ rootCmd.PersistentFlags().StringVar(&endPoint, "endpoint", defaultEndpoint, "m3coordinator endpoint URL")
+ rootCmd.PersistentFlags().StringSliceVarP(&headersSlice, "header", "H", []string{}, "headers to append to requests")
+ applyCmd.Flags().StringVarP(&yamlPath, "file", "f", "", "times to echo the input")
+ getNamespaceCmd.Flags().BoolVarP(&showAll, "show-all", "a", false, "times to echo the input")
+ deletePlacementCmd.Flags().BoolVarP(&deleteAll, "delete-all", "a", false, "delete the entire placement")
+ deleteCmd.PersistentFlags().StringVarP(&nodeName, "name", "n", "", "which namespace or node to delete")
+
+ rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
+ // Override logger if debug flag set.
+ if debug {
+ logger = mustNewLogger(loggerOptions{
+ level: zapcore.DebugLevel,
+ enableStacktrace: true,
+ })
+ }
+
+ // Parse headers slice.
+ for _, h := range headersSlice {
+ parts := strings.Split(h, ":")
+ if len(parts) != 2 {
+ return fmt.Errorf(
+ "header must be of format 'name: value': actual='%s'", h)
+ }
+
+ name, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])
+ headers[name] = value
+ }
+
+ return nil
+ }
+
+ rootCmd.Execute()
+}
diff --git a/src/cmd/tools/m3ctl/namespaces/delete.go b/src/cmd/tools/m3ctl/namespaces/delete.go
new file mode 100644
index 0000000000..4feafe49eb
--- /dev/null
+++ b/src/cmd/tools/m3ctl/namespaces/delete.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package namespaces
+
+import (
+ "fmt"
+
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/client"
+ "go.uber.org/zap"
+)
+
+// DoDelete calls the delete namespaces api on the backend
+func DoDelete(
+ endpoint string,
+ headers map[string]string,
+ nsName string,
+ logger *zap.Logger,
+) ([]byte, error) {
+ url := fmt.Sprintf("%s%s/%s", endpoint, "/api/v1/services/m3db/namespace", nsName)
+ return client.DoDelete(url, headers, logger)
+}
diff --git a/src/cmd/tools/m3ctl/namespaces/get.go b/src/cmd/tools/m3ctl/namespaces/get.go
new file mode 100644
index 0000000000..00efd478e5
--- /dev/null
+++ b/src/cmd/tools/m3ctl/namespaces/get.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package namespaces
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/client"
+)
+
+// DoGet calls the backend get namespace api
+func DoGet(
+ endpoint string,
+ headers map[string]string,
+ logger *zap.Logger,
+) ([]byte, error) {
+ url := fmt.Sprintf("%s%s?%s", endpoint, DefaultPath, DebugQS)
+ return client.DoGet(url, headers, logger)
+}
diff --git a/src/cmd/tools/m3ctl/namespaces/types.go b/src/cmd/tools/m3ctl/namespaces/types.go
new file mode 100644
index 0000000000..8046270391
--- /dev/null
+++ b/src/cmd/tools/m3ctl/namespaces/types.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package namespaces
+
+const (
+ // DefaultPath is the default url path for the namespace api calls
+ DefaultPath = "/api/v1/namespace"
+ // DebugQS this is the query string to activate debug output in api responses
+ DebugQS = "debug=true"
+)
diff --git a/src/cmd/tools/m3ctl/placements/delete.go b/src/cmd/tools/m3ctl/placements/delete.go
new file mode 100644
index 0000000000..1bf29a1aac
--- /dev/null
+++ b/src/cmd/tools/m3ctl/placements/delete.go
@@ -0,0 +1,45 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package placements
+
+import (
+ "fmt"
+
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/client"
+
+ "go.uber.org/zap"
+)
+
+// DoDelete does the delete api calls for placements
+func DoDelete(
+ endpoint string,
+ headers map[string]string,
+ nodeName string,
+ deleteEntire bool,
+ logger *zap.Logger,
+) ([]byte, error) {
+ if deleteEntire {
+ url := fmt.Sprintf("%s%s", endpoint, DefaultPath)
+ return client.DoDelete(url, headers, logger)
+ }
+ url := fmt.Sprintf("%s%s/%s", endpoint, DefaultPath, nodeName)
+ return client.DoDelete(url, headers, logger)
+}
diff --git a/src/cmd/tools/m3ctl/placements/get.go b/src/cmd/tools/m3ctl/placements/get.go
new file mode 100644
index 0000000000..4845a0065f
--- /dev/null
+++ b/src/cmd/tools/m3ctl/placements/get.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package placements
+
+import (
+ "fmt"
+
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/client"
+
+ "go.uber.org/zap"
+)
+
+// DoGet calls the backend api for get placements
+func DoGet(
+ endpoint string,
+ headers map[string]string,
+ logger *zap.Logger,
+) ([]byte, error) {
+ url := fmt.Sprintf("%s%s", endpoint, DefaultPath)
+ return client.DoGet(url, headers, logger)
+}
diff --git a/src/cmd/tools/m3ctl/placements/types.go b/src/cmd/tools/m3ctl/placements/types.go
new file mode 100644
index 0000000000..00917c3a34
--- /dev/null
+++ b/src/cmd/tools/m3ctl/placements/types.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package placements
+
+const (
+ // DefaultPath is the url path for api calls for placements
+ DefaultPath = "/api/v1/services/m3db/placement"
+)
diff --git a/src/cmd/tools/m3ctl/yaml/db_create.proto b/src/cmd/tools/m3ctl/yaml/db_create.proto
new file mode 100644
index 0000000000..5cca79a367
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/db_create.proto
@@ -0,0 +1,8 @@
+syntax = "proto3";
+package yaml;
+
+import "github.com/m3db/m3/src/query/generated/proto/admin/database.proto";
+message DatabaseCreateRequestYaml {
+ string operation = 1;
+ admin.DatabaseCreateRequest request = 2;
+}
diff --git a/src/cmd/tools/m3ctl/yaml/examples/create.yaml b/src/cmd/tools/m3ctl/yaml/examples/create.yaml
new file mode 100644
index 0000000000..0d53cbfe82
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/examples/create.yaml
@@ -0,0 +1,28 @@
+---
+operation: create
+request:
+ type: cluster
+ namespace_name: 1week_namespace
+ retention_time: 168h
+ num_shards: 1024
+ replication_factor: 3
+ hosts:
+ - id: m3db001
+ isolationGroup: us-east1-a
+ zone: embedded
+ weight: 100
+ address: 10.142.0.1
+ port: 9000
+ - id: m3db002
+ isolationGroup: us-east1-b
+ zone: embedded
+ weight: 100
+ address: 10.142.0.2
+ port: 9000
+ - id: m3db003
+ isolationGroup: us-east1-c
+ zone: embedded
+ weight: 100
+ address: 10.142.0.3
+ port: 9000
+
diff --git a/src/cmd/tools/m3ctl/yaml/examples/develdb.yaml b/src/cmd/tools/m3ctl/yaml/examples/develdb.yaml
new file mode 100644
index 0000000000..6b7d774d9e
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/examples/develdb.yaml
@@ -0,0 +1,17 @@
+---
+operation: create
+request:
+ type: cluster
+ namespace_name: default
+ retention_time: 168h
+ num_shards: 64
+ replication_factor: 1
+ hosts:
+ - id: m3db_seed
+ isolation_group: rack-a
+ zone: embedded
+ weight: 1024
+ endpoint: m3db_seed:9000
+ hostname: m3db_seed
+ port: 9000
+
diff --git a/src/cmd/tools/m3ctl/yaml/examples/init.yaml b/src/cmd/tools/m3ctl/yaml/examples/init.yaml
new file mode 100644
index 0000000000..f86723c266
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/examples/init.yaml
@@ -0,0 +1,27 @@
+---
+operation: init
+request:
+ num_shards: 64
+ replication_factor: 1
+ instances:
+ - id: nodeid1
+ isolation_group: isogroup1
+ zone: etcd1
+ weight: 100
+ endpoint: node1:9000
+ hostname: node1
+ port: 9000
+ - id: nodeid2
+ isolation_group: isogroup2
+ zone: etcd1
+ weight: 100
+ endpoint: node2:9000
+ hostname: node2
+ port: 9000
+ - id: nodeid3
+ isolation_group: isogroup3
+ zone: etcd1
+ weight: 100
+ endpoint: node3:9000
+ hostname: node3
+ port: 9000
\ No newline at end of file
diff --git a/src/cmd/tools/m3ctl/yaml/examples/new_node.yaml b/src/cmd/tools/m3ctl/yaml/examples/new_node.yaml
new file mode 100644
index 0000000000..abdccf73bd
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/examples/new_node.yaml
@@ -0,0 +1,11 @@
+---
+operation: newNode
+request:
+ instances:
+ - id: node1
+ isolationGroup: isoGroup1
+ zone: embedded
+ weight: 100
+ endpoint: targetHostname1:9000
+ hostname: newNodeHostname1
+ port: 9000
\ No newline at end of file
diff --git a/src/cmd/tools/m3ctl/yaml/examples/replace_node.yaml b/src/cmd/tools/m3ctl/yaml/examples/replace_node.yaml
new file mode 100644
index 0000000000..37ee51120a
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/examples/replace_node.yaml
@@ -0,0 +1,14 @@
+---
+operation: replaceNode
+request:
+ leavingInstanceIDs:
+ - oldnodeid1
+ candidates:
+ - id: newnodeid1
+ isolationGroup: newnodeisogroup1
+ zone: etcdzone1
+ weight: 100
+ endpoint: node11:9000
+ hostname: node11
+ port: 9000
+
diff --git a/src/cmd/tools/m3ctl/yaml/generated/db_create.pb.go b/src/cmd/tools/m3ctl/yaml/generated/db_create.pb.go
new file mode 100644
index 0000000000..425a5ab05a
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/generated/db_create.pb.go
@@ -0,0 +1,112 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: db_create.proto
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package yaml
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/golang/protobuf/proto"
+ admin "github.com/m3db/m3/src/query/generated/proto/admin"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type DatabaseCreateRequestYaml struct {
+ Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
+ Request *admin.DatabaseCreateRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DatabaseCreateRequestYaml) Reset() { *m = DatabaseCreateRequestYaml{} }
+func (m *DatabaseCreateRequestYaml) String() string { return proto.CompactTextString(m) }
+func (*DatabaseCreateRequestYaml) ProtoMessage() {}
+func (*DatabaseCreateRequestYaml) Descriptor() ([]byte, []int) {
+ return fileDescriptor_57e276f15713f139, []int{0}
+}
+
+func (m *DatabaseCreateRequestYaml) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DatabaseCreateRequestYaml.Unmarshal(m, b)
+}
+func (m *DatabaseCreateRequestYaml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DatabaseCreateRequestYaml.Marshal(b, m, deterministic)
+}
+func (m *DatabaseCreateRequestYaml) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DatabaseCreateRequestYaml.Merge(m, src)
+}
+func (m *DatabaseCreateRequestYaml) XXX_Size() int {
+ return xxx_messageInfo_DatabaseCreateRequestYaml.Size(m)
+}
+func (m *DatabaseCreateRequestYaml) XXX_DiscardUnknown() {
+ xxx_messageInfo_DatabaseCreateRequestYaml.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DatabaseCreateRequestYaml proto.InternalMessageInfo
+
+func (m *DatabaseCreateRequestYaml) GetOperation() string {
+ if m != nil {
+ return m.Operation
+ }
+ return ""
+}
+
+func (m *DatabaseCreateRequestYaml) GetRequest() *admin.DatabaseCreateRequest {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*DatabaseCreateRequestYaml)(nil), "yaml.DatabaseCreateRequestYaml")
+}
+
+func init() { proto.RegisterFile("db_create.proto", fileDescriptor_57e276f15713f139) }
+
+var fileDescriptor_57e276f15713f139 = []byte{
+ // 177 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8d, 0xb1, 0xca, 0xc2, 0x30,
+ 0x14, 0x46, 0xe9, 0xcf, 0x8f, 0xd2, 0x38, 0x08, 0x9d, 0xaa, 0x74, 0x28, 0x4e, 0x9d, 0x72, 0xc1,
+ 0x82, 0xbb, 0xe8, 0x13, 0x74, 0x73, 0x92, 0x9b, 0xe6, 0x52, 0x0b, 0x4d, 0xd3, 0xa6, 0x37, 0x43,
+ 0xdf, 0x5e, 0x8c, 0x15, 0x17, 0xd7, 0x8f, 0xf3, 0x9d, 0x23, 0xb6, 0x5a, 0xdd, 0x6b, 0x47, 0xc8,
+ 0x24, 0x07, 0x67, 0xd9, 0x26, 0xff, 0x33, 0x9a, 0x6e, 0x7f, 0x6e, 0x5a, 0x7e, 0x78, 0x25, 0x6b,
+ 0x6b, 0xc0, 0x94, 0x5a, 0x81, 0x29, 0x61, 0x72, 0x35, 0x8c, 0x9e, 0xdc, 0x0c, 0x0d, 0xf5, 0xe4,
+ 0x90, 0x49, 0x43, 0xf8, 0x00, 0x6a, 0xd3, 0xf6, 0xa0, 0x91, 0x51, 0xe1, 0xb4, 0x88, 0x0e, 0xa3,
+ 0xd8, 0x5d, 0x97, 0xe5, 0x12, 0x02, 0x15, 0x8d, 0x9e, 0x26, 0xbe, 0xa1, 0xe9, 0x92, 0x4c, 0xc4,
+ 0x76, 0x78, 0x39, 0x5a, 0xdb, 0xa7, 0x51, 0x1e, 0x15, 0x71, 0xf5, 0x1d, 0x92, 0x93, 0x58, 0xbb,
+ 0x37, 0x9c, 0xfe, 0xe5, 0x51, 0xb1, 0x39, 0x66, 0x32, 0x24, 0xe4, 0x4f, 0x61, 0xf5, 0x81, 0xd5,
+ 0x2a, 0x94, 0xcb, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x30, 0x42, 0x7b, 0xd5, 0x00, 0x00,
+ 0x00,
+}
diff --git a/src/cmd/tools/m3ctl/yaml/generated/placement.pb.go b/src/cmd/tools/m3ctl/yaml/generated/placement.pb.go
new file mode 100644
index 0000000000..a6144371b8
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/generated/placement.pb.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: placement.proto
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package yaml
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/golang/protobuf/proto"
+ admin "github.com/m3db/m3/src/query/generated/proto/admin"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type PlacementInitRequestYaml struct {
+ Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
+ Request *admin.PlacementInitRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PlacementInitRequestYaml) Reset() { *m = PlacementInitRequestYaml{} }
+func (m *PlacementInitRequestYaml) String() string { return proto.CompactTextString(m) }
+func (*PlacementInitRequestYaml) ProtoMessage() {}
+func (*PlacementInitRequestYaml) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ae0216eeb0d08e49, []int{0}
+}
+
+func (m *PlacementInitRequestYaml) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PlacementInitRequestYaml.Unmarshal(m, b)
+}
+func (m *PlacementInitRequestYaml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PlacementInitRequestYaml.Marshal(b, m, deterministic)
+}
+func (m *PlacementInitRequestYaml) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlacementInitRequestYaml.Merge(m, src)
+}
+func (m *PlacementInitRequestYaml) XXX_Size() int {
+ return xxx_messageInfo_PlacementInitRequestYaml.Size(m)
+}
+func (m *PlacementInitRequestYaml) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlacementInitRequestYaml.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlacementInitRequestYaml proto.InternalMessageInfo
+
+func (m *PlacementInitRequestYaml) GetOperation() string {
+ if m != nil {
+ return m.Operation
+ }
+ return ""
+}
+
+func (m *PlacementInitRequestYaml) GetRequest() *admin.PlacementInitRequest {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+type PlacementReplaceRequestYaml struct {
+ Operation string `protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"`
+ Request *admin.PlacementReplaceRequest `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PlacementReplaceRequestYaml) Reset() { *m = PlacementReplaceRequestYaml{} }
+func (m *PlacementReplaceRequestYaml) String() string { return proto.CompactTextString(m) }
+func (*PlacementReplaceRequestYaml) ProtoMessage() {}
+func (*PlacementReplaceRequestYaml) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ae0216eeb0d08e49, []int{1}
+}
+
+func (m *PlacementReplaceRequestYaml) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PlacementReplaceRequestYaml.Unmarshal(m, b)
+}
+func (m *PlacementReplaceRequestYaml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PlacementReplaceRequestYaml.Marshal(b, m, deterministic)
+}
+func (m *PlacementReplaceRequestYaml) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PlacementReplaceRequestYaml.Merge(m, src)
+}
+func (m *PlacementReplaceRequestYaml) XXX_Size() int {
+ return xxx_messageInfo_PlacementReplaceRequestYaml.Size(m)
+}
+func (m *PlacementReplaceRequestYaml) XXX_DiscardUnknown() {
+ xxx_messageInfo_PlacementReplaceRequestYaml.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PlacementReplaceRequestYaml proto.InternalMessageInfo
+
+func (m *PlacementReplaceRequestYaml) GetOperation() string {
+ if m != nil {
+ return m.Operation
+ }
+ return ""
+}
+
+func (m *PlacementReplaceRequestYaml) GetRequest() *admin.PlacementReplaceRequest {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*PlacementInitRequestYaml)(nil), "yaml.PlacementInitRequestYaml")
+ proto.RegisterType((*PlacementReplaceRequestYaml)(nil), "yaml.PlacementReplaceRequestYaml")
+}
+
+func init() { proto.RegisterFile("placement.proto", fileDescriptor_ae0216eeb0d08e49) }
+
+var fileDescriptor_ae0216eeb0d08e49 = []byte{
+ // 201 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc8, 0x49, 0x4c,
+ 0x4e, 0xcd, 0x4d, 0xcd, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xa9, 0x4c, 0xcc,
+ 0xcd, 0x91, 0x72, 0x4a, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0x35,
+ 0x4e, 0x49, 0xd2, 0xcf, 0x35, 0xd6, 0x2f, 0x2e, 0x4a, 0xd6, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4,
+ 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x07, 0xeb, 0xd1, 0x4f, 0x4c, 0xc9,
+ 0xcd, 0xcc, 0xd3, 0x47, 0x33, 0x49, 0x29, 0x9f, 0x4b, 0x22, 0x00, 0x26, 0xe4, 0x99, 0x97, 0x59,
+ 0x12, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x12, 0x99, 0x98, 0x9b, 0x23, 0x24, 0xc3, 0xc5, 0x99,
+ 0x5f, 0x00, 0x32, 0x23, 0x33, 0x3f, 0x4f, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0x21, 0x20,
+ 0x64, 0xca, 0xc5, 0x5e, 0x04, 0x51, 0x2c, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xad, 0x07,
+ 0xb6, 0x42, 0x0f, 0x9b, 0x79, 0x41, 0x30, 0xb5, 0x4a, 0xa5, 0x5c, 0xd2, 0x70, 0x05, 0x41, 0xa9,
+ 0x60, 0xe7, 0xe0, 0xb4, 0x93, 0x19, 0xdd, 0x4e, 0x0b, 0x84, 0x9d, 0x2c, 0x60, 0x3b, 0xe5, 0xd0,
+ 0xed, 0x44, 0x35, 0x12, 0x6e, 0x6d, 0x12, 0x1b, 0xd8, 0xbb, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0x5e, 0x24, 0x27, 0x1e, 0x4b, 0x01, 0x00, 0x00,
+}
diff --git a/src/cmd/tools/m3ctl/yaml/load.go b/src/cmd/tools/m3ctl/yaml/load.go
new file mode 100644
index 0000000000..34c93a05a9
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/load.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "go.uber.org/zap"
+ "io"
+ "io/ioutil"
+
+ "github.com/gogo/protobuf/jsonpb"
+ "github.com/gogo/protobuf/proto"
+)
+
+// Load reads a yaml representation of an m3 structure
+// and produces an io.Reader of it protocol buffer-encoded
+//
+// we don't know anything about what the user it trying to do
+// so peek at it to see what's the intended action then load it
+//
+// See the examples directories.
+//
+func Load(path string, zl *zap.Logger) (string, io.Reader, error) {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return "", nil, err
+ }
+ url, pbmessage, err := peeker(content)
+ if err != nil {
+ return "", nil, err
+ }
+ rv, err := load(pbmessage)
+ return url, rv, nil
+}
+
+func load(target proto.Message) (io.Reader, error) {
+ // marshal it into protocol buffers
+ var data *bytes.Buffer
+ data = bytes.NewBuffer(nil)
+ marshaller := &jsonpb.Marshaler{}
+ if err := marshaller.Marshal(data, target); err != nil {
+ return nil, err
+ }
+ return data, nil
+}
diff --git a/src/cmd/tools/m3ctl/yaml/peeker.go b/src/cmd/tools/m3ctl/yaml/peeker.go
new file mode 100644
index 0000000000..94699e523c
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/peeker.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+ "github.com/gogo/protobuf/proto"
+
+ "github.com/m3db/m3/src/cmd/tools/m3ctl/placements"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+)
+
+// peek into the yaml to see what it is expected to be
+//
+// returns the url path, proto.Message, and error
+func peeker(data []byte) (string, proto.Message, error) {
+ type peeker struct {
+ Operation string
+ }
+ peek := &peeker{}
+
+ // this really does nothing more than unpack into the above
+ // private type to take a peek at Operation
+ // its just a peek
+ if err := yaml.Unmarshal(data, &peek); err != nil {
+ return "", nil, err
+ }
+
+ // now the payload is of known type
+ // unmarshal it and return the proto.Message
+ switch peek.Operation {
+ case opCreate:
+ payload := struct{ Request admin.DatabaseCreateRequest }{}
+ if err := yaml.Unmarshal(data, &payload); err != nil {
+ return "", nil, err
+ }
+ return dbcreatePath, &payload.Request, nil
+ case opInit:
+ payload := struct{ Request admin.PlacementInitRequest }{}
+ if err := yaml.Unmarshal(data, &payload); err != nil {
+ return "", nil, err
+ }
+ return fmt.Sprintf("%s/init", placements.DefaultPath), &payload.Request, nil
+ case opReplace:
+ payload := struct{ Request admin.PlacementReplaceRequest }{}
+ if err := yaml.Unmarshal(data, &payload); err != nil {
+ return "", nil, err
+ }
+ return fmt.Sprintf("%s/replace", placements.DefaultPath), &payload.Request, nil
+ case opNewNode:
+ payload := struct{ Request admin.PlacementInitRequest }{}
+ if err := yaml.Unmarshal(data, &payload); err != nil {
+ return "", nil, err
+ }
+ return fmt.Sprintf("%s", placements.DefaultPath), &payload.Request, nil
+ default:
+ return "", nil, fmt.Errorf("Unknown operation specified in the yaml\n")
+ }
+
+}
diff --git a/src/cmd/tools/m3ctl/yaml/peeker_test.go b/src/cmd/tools/m3ctl/yaml/peeker_test.go
new file mode 100644
index 0000000000..f822b4cb4e
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/peeker_test.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package yaml
+
+import (
+ "io/ioutil"
+ "testing"
+
+ "github.com/gogo/protobuf/jsonpb"
+
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+)
+
+func TestPeekerPositive(t *testing.T) {
+ content, err := ioutil.ReadFile("./testdata/basic_create.yaml")
+ if err != nil {
+ t.Fatalf("failed to read yaml test data:%v:\n", err)
+ }
+ urlpath, pbmessage, err := peeker(content)
+ if err != nil {
+ t.Fatalf("operation selector failed to encode the unknown operation yaml test data:%v:\n", err)
+ }
+ if urlpath != dbcreatePath {
+ t.Errorf("urlpath is wrong:expected:%s:got:%s:\n", dbcreatePath, urlpath)
+ }
+ data, err := load(pbmessage)
+ if err != nil {
+ t.Fatalf("failed to encode to protocol:%v:\n", err)
+ }
+ var dest admin.DatabaseCreateRequest
+ unmarshaller := &jsonpb.Unmarshaler{AllowUnknownFields: true}
+ if err := unmarshaller.Unmarshal(data, &dest); err != nil {
+ t.Fatalf("operation selector failed to unmarshal unknown operation data:%v:\n", err)
+ }
+ t.Logf("dest:%v:\n", dest)
+ if dest.NamespaceName != "default" {
+ t.Errorf("dest NamespaceName does not have the correct value via operation:expected:%v:got:%v:", opCreate, dest.NamespaceName)
+ }
+ if dest.Type != "cluster" {
+ t.Errorf("dest type does not have the correct value via operation:expected:%v:got:%v:", opCreate, dest.Type)
+ }
+ if dest.ReplicationFactor != 327 {
+ t.Errorf("in and out ReplicationFactor did not match:expected:%d:got:%d:\n", 327, dest.ReplicationFactor)
+ }
+ if len(dest.Hosts) != 1 {
+ t.Errorf("number of hosts is wrong:expected:%d:got:%d:\n", 1, len(dest.Hosts))
+ }
+ if dest.Hosts[0].Id != "m3db_seed" {
+ t.Errorf("hostname is wrong:expected:%s:got:%s:\n", "m3db_seed", dest.Hosts[0])
+ }
+}
+func TestPeekerNegative(t *testing.T) {
+ content, err := ioutil.ReadFile("./testdata/unknown_operation.yaml")
+ if err != nil {
+ t.Fatalf("failed to read yaml test data:%v:\n", err)
+ }
+ if _, _, err := peeker(content); err == nil {
+ t.Fatalf("operation selector should have returned an error\n")
+ }
+}
diff --git a/src/cmd/tools/m3ctl/yaml/placement.proto b/src/cmd/tools/m3ctl/yaml/placement.proto
new file mode 100644
index 0000000000..bdf71f77eb
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/placement.proto
@@ -0,0 +1,12 @@
+syntax = "proto3";
+package yaml;
+
+import "github.com/m3db/m3/src/query/generated/proto/admin/placement.proto";
+message PlacementInitRequestYaml {
+ string operation = 1;
+ admin.PlacementInitRequest request = 2;
+}
+message PlacementReplaceRequestYaml {
+ string operation = 3;
+ admin.PlacementReplaceRequest request = 4;
+}
\ No newline at end of file
diff --git a/src/cmd/tools/m3ctl/yaml/testdata/basic_create.yaml b/src/cmd/tools/m3ctl/yaml/testdata/basic_create.yaml
new file mode 100644
index 0000000000..a5a513a481
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/testdata/basic_create.yaml
@@ -0,0 +1,17 @@
+---
+operation: create
+request:
+ type: cluster
+ namespace_name: default
+ retention_time: 168h
+ num_shards: 64
+ replication_factor: 327
+ hosts:
+ - id: m3db_seed
+ isolation_group: rack-a
+ zone: embedded
+ weight: 1024
+ endpoint: m3db_seed:9000
+ hostname: m3db_seed
+ port: 9000
+
diff --git a/src/cmd/tools/m3ctl/yaml/testdata/unknown_operation.yaml b/src/cmd/tools/m3ctl/yaml/testdata/unknown_operation.yaml
new file mode 100644
index 0000000000..c721f32050
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/testdata/unknown_operation.yaml
@@ -0,0 +1,17 @@
+---
+operation: unknown
+request:
+ type: cluster
+ namespace_name: default
+ retention_time: 168h
+ num_shards: 64
+ replication_factor: 327
+ hosts:
+ - id: m3db_seed
+ isolation_group: rack-a
+ zone: embedded
+ weight: 1024
+ endpoint: m3db_seed:9000
+ hostname: m3db_seed
+ port: 9000
+
diff --git a/src/cmd/tools/m3ctl/yaml/types.go b/src/cmd/tools/m3ctl/yaml/types.go
new file mode 100644
index 0000000000..9a70a88e7a
--- /dev/null
+++ b/src/cmd/tools/m3ctl/yaml/types.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package yaml
+
+const (
+ opCreate = "create"
+ opInit = "init"
+ opReplace = "replaceNode"
+ opNewNode = "newNode"
+ dbcreatePath = "/api/v1/database/create"
+)
diff --git a/src/cmd/tools/read_index_files/main/main.go b/src/cmd/tools/read_index_files/main/main.go
index 7f1fc7de36..317dc9e935 100644
--- a/src/cmd/tools/read_index_files/main/main.go
+++ b/src/cmd/tools/read_index_files/main/main.go
@@ -44,12 +44,13 @@ import (
func main() {
var (
- optPathPrefix = getopt.StringLong("path-prefix", 'p', "/var/lib/m3db", "Path prefix [e.g. /var/lib/m3db]")
- optNamespace = getopt.StringLong("namespace", 'n', "metrics", "Namespace [e.g. metrics]")
- optBlockstart = getopt.Int64Long("block-start", 'b', 0, "Block Start Time [in nsec]")
- optVolumeIndex = getopt.Int64Long("volume-index", 'v', 0, "Volume index")
- optLargeFieldLimit = getopt.Int64Long("large-field-limit", 'l', 0, "Large Field Limit (non-zero to display fields with num terms > limit)")
- optOutputIdsPrefix = getopt.StringLong("output-ids-prefix", 'o', "", "If set, it emits all terms for the _m3ninx_id field.")
+ optPathPrefix = getopt.StringLong("path-prefix", 'p', "/var/lib/m3db", "Path prefix [e.g. /var/lib/m3db]")
+ optNamespace = getopt.StringLong("namespace", 'n', "metrics", "Namespace [e.g. metrics]")
+ optBlockstart = getopt.Int64Long("block-start", 'b', 0, "Block Start Time [in nsec]")
+ optVolumeIndex = getopt.Int64Long("volume-index", 'v', 0, "Volume index")
+ optLargeFieldLimit = getopt.Int64Long("large-field-limit", 'l', 0, "Large Field Limit (non-zero to display fields with num terms > limit)")
+ optOutputIdsPrefix = getopt.StringLong("output-ids-prefix", 'o', "", "If set, it emits all terms for the _m3ninx_id field.")
+ optSkipValidateIntegrity = getopt.BoolLong("skip-validate-integrity", 's', "If set will skip integrity validation on segment open")
)
getopt.Parse()
@@ -66,7 +67,9 @@ func main() {
os.Exit(1)
}
- fsOpts := fs.NewOptions().SetFilePathPrefix(*optPathPrefix)
+ fsOpts := fs.NewOptions().
+ SetFilePathPrefix(*optPathPrefix).
+ SetIndexReaderAutovalidateIndexSegments(!*optSkipValidateIntegrity)
reader, err := fs.NewIndexReader(fsOpts)
if err != nil {
log.Fatalf("could not create new index reader: %v", err)
@@ -89,6 +92,7 @@ func main() {
i := 0
for {
i++
+ log.Info("opening index segment file set")
fileset, err := reader.ReadSegmentFileSet()
if err == io.EOF {
break
@@ -97,6 +101,11 @@ func main() {
log.Fatalf("unable to retrieve fileset: %v", err)
}
+ log.Info("validating index segment file set")
+ if err := reader.Validate(); err != nil {
+ log.Fatalf("error validating segment file set: %v", err)
+ }
+
seg, err := m3ninxpersist.NewSegment(fileset, fsOpts.FSTOptions())
if err != nil {
log.Fatalf("unable to open segment reader: %v", err)
diff --git a/src/cmd/tools/read_index_segments/README.md b/src/cmd/tools/read_index_segments/README.md
new file mode 100644
index 0000000000..b5d0a7afb0
--- /dev/null
+++ b/src/cmd/tools/read_index_segments/README.md
@@ -0,0 +1,16 @@
+# read_segments
+
+`read_segments` is a utility to read segments in a given directory.
+
+# Usage
+```
+$ git clone git@github.com:m3db/m3.git
+$ make read_segments
+$ ./bin/read_segments
+Usage: read_index_segments [-o value] [-p value] [parameters ...]
+ -o, --output-file=value
+ Output JSON file of line delimited JSON objects for each
+ segment
+ -p, --path-prefix=value
+ Path prefix [e.g. /var/lib/m3db]
+```
diff --git a/src/cmd/tools/read_index_segments/main/main.go b/src/cmd/tools/read_index_segments/main/main.go
new file mode 100644
index 0000000000..e314c76919
--- /dev/null
+++ b/src/cmd/tools/read_index_segments/main/main.go
@@ -0,0 +1,330 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "io"
+
+ "github.com/m3db/m3/src/x/unsafe"
+
+ "fmt"
+ golog "log"
+ "os"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/query/util/json"
+ "github.com/m3db/m3/src/x/ident"
+
+ "github.com/pborman/getopt"
+ "go.uber.org/zap"
+)
+
+func main() {
+ var (
+ optPathPrefix = getopt.StringLong("path-prefix", 'p', "/var/lib/m3db", "Path prefix [e.g. /var/lib/m3db]")
+ optOutputFile = getopt.StringLong("output-file", 'o', "", "Output JSON file of line delimited JSON objects for each segment")
+ )
+ getopt.Parse()
+
+ logConfig := zap.NewDevelopmentConfig()
+ log, err := logConfig.Build()
+ if err != nil {
+ golog.Fatalf("unable to create logger: %+v", err)
+ }
+
+ if *optPathPrefix == "" || *optOutputFile == "" {
+ getopt.Usage()
+ os.Exit(1)
+ }
+
+ run(runOptions{
+ filePathPrefix: *optPathPrefix,
+ outputFilePath: *optOutputFile,
+ log: log,
+ })
+
+}
+
+type runOptions struct {
+ filePathPrefix string
+ outputFilePath string
+ log *zap.Logger
+}
+
+func run(opts runOptions) {
+ log := opts.log
+
+ fsOpts := fs.NewOptions().
+ SetFilePathPrefix(opts.filePathPrefix)
+
+ indexDirPath := fs.IndexDataDirPath(opts.filePathPrefix)
+
+ namespaces, err := dirFiles(indexDirPath)
+ if err != nil {
+ log.Fatal("could not read namespaces", zap.Error(err))
+ }
+
+ // Get all fileset files.
+ log.Info("discovered namespaces", zap.Strings("namespaces", namespaces))
+
+ out, err := os.Create(opts.outputFilePath)
+ if err != nil {
+ log.Fatal("unable to create output file",
+ zap.String("file", opts.outputFilePath),
+ zap.Error(err))
+ }
+
+ for _, namespace := range namespaces {
+ log.Info("reading segments", zap.String("namespace", namespace))
+ ns := ident.StringID(namespace)
+
+ readNamespaceSegments(out, ns, fsOpts, log)
+
+ // Separate by endline.
+ if _, err := out.WriteString("\n"); err != nil {
+ log.Fatal("could not write endline", zap.Error(err))
+ }
+ }
+}
+
+func readNamespaceSegments(
+ out io.Writer,
+ nsID ident.ID,
+ fsOpts fs.Options,
+ log *zap.Logger,
+) {
+ infoFiles := fs.ReadIndexInfoFiles(fsOpts.FilePathPrefix(), nsID,
+ fsOpts.InfoReaderBufferSize())
+
+ for _, infoFile := range infoFiles {
+ if err := infoFile.Err.Error(); err != nil {
+ log.Error("unable to read index info file",
+ zap.Stringer("namespace", nsID),
+ zap.Error(err),
+ zap.String("filepath", infoFile.Err.Filepath()),
+ )
+ continue
+ }
+
+ segments, err := fs.ReadIndexSegments(fs.ReadIndexSegmentsOptions{
+ ReaderOptions: fs.IndexReaderOpenOptions{
+ Identifier: infoFile.ID,
+ FileSetType: persist.FileSetFlushType,
+ },
+ FilesystemOptions: fsOpts,
+ })
+ if err != nil {
+ log.Error("unable to read segments from index fileset",
+ zap.Stringer("namespace", nsID),
+ zap.Error(err),
+ zap.Time("blockStart", time.Unix(0, infoFile.Info.BlockStart)),
+ zap.Int("volumeIndex", infoFile.ID.VolumeIndex),
+ )
+ continue
+ }
+
+ for i, seg := range segments {
+ jw := json.NewWriter(out)
+ jw.BeginObject()
+
+ jw.BeginObjectField("namespace")
+ jw.WriteString(nsID.String())
+
+ jw.BeginObjectField("blockStart")
+ jw.WriteString(time.Unix(0, infoFile.Info.BlockStart).Format(time.RFC3339))
+
+ jw.BeginObjectField("volumeIndex")
+ jw.WriteInt(infoFile.ID.VolumeIndex)
+
+ jw.BeginObjectField("segmentIndex")
+ jw.WriteInt(i)
+
+ reader, err := seg.Reader()
+ if err != nil {
+ log.Fatal("unable to create segment reader", zap.Error(err))
+ }
+
+ iter, err := reader.AllDocs()
+ if err != nil {
+ log.Fatal("unable to iterate segment docs", zap.Error(err))
+ }
+
+ jw.BeginObjectField("documents")
+ jw.BeginArray()
+ for postingsID := 0; iter.Next(); postingsID++ {
+ d := iter.Current()
+ jw.BeginObject()
+
+ jw.BeginObjectField("postingsID")
+ jw.WriteInt(postingsID)
+
+ jw.BeginObjectField("id")
+ unsafe.WithString(d.ID, func(str string) {
+ jw.WriteString(str)
+ })
+
+ jw.BeginObjectField("fields")
+
+ jw.BeginArray()
+ for _, field := range d.Fields {
+ jw.BeginObject()
+
+ jw.BeginObjectField("name")
+ unsafe.WithString(field.Name, func(str string) {
+ jw.WriteString(str)
+ })
+
+ jw.BeginObjectField("value")
+ unsafe.WithString(field.Name, func(str string) {
+ jw.WriteString(str)
+ })
+
+ jw.EndObject()
+ }
+ jw.EndArray()
+
+ jw.EndObject()
+ }
+ jw.EndArray()
+
+ if err := iter.Err(); err != nil {
+ log.Fatal("doc iterator error", zap.Error(err))
+ }
+ if err := iter.Close(); err != nil {
+ log.Fatal("doc iterator close error", zap.Error(err))
+ }
+
+ fieldsIter, err := seg.FieldsIterable().Fields()
+ if err != nil {
+ log.Fatal("could not create fields iterator", zap.Error(err))
+ }
+
+ jw.BeginObjectField("fields")
+ jw.BeginArray()
+ for fieldsIter.Next() {
+ field := fieldsIter.Current()
+
+ jw.BeginObject()
+ jw.BeginObjectField("field")
+ unsafe.WithString(field, func(str string) {
+ jw.WriteString(str)
+ })
+
+ termsIter, err := seg.TermsIterable().Terms(field)
+ if err != nil {
+ log.Fatal("could not create terms iterator", zap.Error(err))
+ }
+
+ jw.BeginObjectField("terms")
+ jw.BeginArray()
+ for termsIter.Next() {
+ term, postingsList := termsIter.Current()
+
+ jw.BeginObject()
+ jw.BeginObjectField("term")
+ unsafe.WithString(term, func(str string) {
+ jw.WriteString(str)
+ })
+
+ postingsIter := postingsList.Iterator()
+
+ jw.BeginObjectField("postings")
+ jw.BeginArray()
+ for postingsIter.Next() {
+ postingsID := postingsIter.Current()
+ jw.WriteInt(int(postingsID))
+ }
+ jw.EndArray()
+ jw.EndObject()
+
+ if err := postingsIter.Err(); err != nil {
+ log.Fatal("postings iterator error", zap.Error(err))
+ }
+
+ if err := postingsIter.Close(); err != nil {
+ log.Fatal("postings iterator close error", zap.Error(err))
+ }
+ }
+ jw.EndArray()
+ jw.EndObject()
+
+ if err := termsIter.Err(); err != nil {
+ log.Fatal("field iterator error", zap.Error(err))
+ }
+
+ if err := termsIter.Close(); err != nil {
+ log.Fatal("field iterator close error", zap.Error(err))
+ }
+ }
+ jw.EndArray()
+
+ if err := fieldsIter.Err(); err != nil {
+ log.Fatal("field iterator error", zap.Error(err))
+ }
+
+ if err := fieldsIter.Close(); err != nil {
+ log.Fatal("field iterator close error", zap.Error(err))
+ }
+
+ jw.EndObject()
+
+ if err := jw.Flush(); err != nil {
+ log.Fatal("could not flush JSON writer", zap.Error(err))
+ }
+ if err := jw.Close(); err != nil {
+ log.Fatal("could not close JSON writer", zap.Error(err))
+ }
+ }
+ }
+}
+
+func dirFiles(dirPath string) ([]string, error) {
+ dir, err := os.Open(dirPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not open dir: %v", err)
+ }
+
+ defer dir.Close()
+
+ stat, err := dir.Stat()
+ if err != nil {
+ return nil, fmt.Errorf("could not stat dir: %v", err)
+ }
+ if !stat.IsDir() {
+ return nil, fmt.Errorf("path is not a directory: %s", dirPath)
+ }
+
+ entries, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read dir names: %v", err)
+ }
+
+ results := entries[:0]
+ for _, p := range entries {
+ if p == "." || p == ".." || p == "./.." || p == "./" || p == "../" || p == "./../" {
+ continue
+ }
+ results = append(results, p)
+ }
+ return results, nil
+}
diff --git a/src/cmd/tools/verify_data_files/main/main.go b/src/cmd/tools/verify_data_files/main/main.go
index 7c65cc0b17..9ac107f650 100644
--- a/src/cmd/tools/verify_data_files/main/main.go
+++ b/src/cmd/tools/verify_data_files/main/main.go
@@ -381,11 +381,9 @@ func fixFileSet(
}()
var (
- currTags []ident.Tag
- currTagsIter = ident.NewTagsIterator(ident.Tags{})
- removedIDs int
- removedTags int
- copies []checked.Bytes
+ removedIDs int
+ removedTags int
+ copies []checked.Bytes
)
for {
id, tags, data, checksum, err := reader.Read()
@@ -396,39 +394,9 @@ func fixFileSet(
return err
}
- // Need to save tags in case we need to write them out again
- // (iterating them in read entry means we can't reiterate them
- // without copying/duplicating).
- currTags = currTags[:0]
- for tags.Next() {
- tag := tags.Current()
- name := tag.Name.Bytes()
- value := tag.Value.Bytes()
-
- // Need to take copy as only valid during iteration.
- nameCopy := opts.bytesPool.Get(len(name))
- nameCopy.IncRef()
- nameCopy.AppendAll(name)
- valueCopy := opts.bytesPool.Get(len(value))
- valueCopy.IncRef()
- valueCopy.AppendAll(value)
- copies = append(copies, nameCopy)
- copies = append(copies, valueCopy)
-
- currTags = append(currTags, ident.Tag{
- Name: ident.BytesID(nameCopy.Bytes()),
- Value: ident.BytesID(valueCopy.Bytes()),
- })
- }
-
- // Choose to write out the current tags if do not need modifying.
- writeTags := currTags[:]
+ tagsCopy := tags.Duplicate()
- var currIdentTags ident.Tags
- currIdentTags.Reset(currTags)
- currTagsIter.Reset(currIdentTags)
-
- check, err := readEntry(id, currTagsIter, data, checksum)
+ check, err := readEntry(id, tags, data, checksum)
if err != nil {
shouldFixInvalidID := check.invalidID && opts.fixInvalidIDs
shouldFixInvalidTags := check.invalidTags && opts.fixInvalidTags
@@ -447,11 +415,14 @@ func fixFileSet(
return fmt.Errorf("encountered an error not enabled to fix: %v", err)
}
- var writeIdentTags ident.Tags
- writeIdentTags.Reset(writeTags)
+ metadata := persist.NewMetadataFromIDAndTagIterator(id, tagsCopy,
+ persist.MetadataOptions{
+ FinalizeID: true,
+ FinalizeTagIterator: true,
+ })
data.IncRef()
- err = writer.Write(id, writeIdentTags, data, checksum)
+ err = writer.Write(metadata, data, checksum)
data.DecRef()
if err != nil {
return fmt.Errorf("could not write fixed file set entry: %v", err)
diff --git a/src/cmd/tools/verify_data_files/main/main_test.go b/src/cmd/tools/verify_data_files/main/main_test.go
index fcc4a54947..b46592533a 100644
--- a/src/cmd/tools/verify_data_files/main/main_test.go
+++ b/src/cmd/tools/verify_data_files/main/main_test.go
@@ -34,7 +34,7 @@ import (
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
-
+
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
@@ -120,12 +120,16 @@ func TestFixFileSetInvalidID(t *testing.T) {
checksum := digest.Checksum(data.Bytes())
writer := testWriter.writer
- err = writer.Write(id, ident.Tags{}, data, checksum)
+ metadata := persist.NewMetadataFromIDAndTags(id, ident.Tags{},
+ persist.MetadataOptions{})
+ err = writer.Write(metadata, data, checksum)
require.NoError(t, err)
// Write valid ID.
id = ident.StringID("foo")
- err = writer.Write(id, ident.Tags{}, data, checksum)
+ metadata = persist.NewMetadataFromIDAndTags(id, ident.Tags{},
+ persist.MetadataOptions{})
+ err = writer.Write(metadata, data, checksum)
require.NoError(t, err)
// Close volume.
@@ -189,7 +193,9 @@ func TestFixFileSetInvalidTags(t *testing.T) {
data.IncRef()
checksum := digest.Checksum(data.Bytes())
- err = writer.Write(id, tags, data, checksum)
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ err = writer.Write(metadata, data, checksum)
require.NoError(t, err)
// Write valid tags.
@@ -208,7 +214,9 @@ func TestFixFileSetInvalidTags(t *testing.T) {
data.IncRef()
checksum = digest.Checksum(data.Bytes())
- err = writer.Write(id, tags, data, checksum)
+ metadata = persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ err = writer.Write(metadata, data, checksum)
require.NoError(t, err)
// Close volume.
@@ -281,7 +289,9 @@ func TestFixFileSetInvalidChecksum(t *testing.T) {
data.IncRef()
checksum := digest.Checksum(data.Bytes()) + 1
- err = writer.Write(id, tags, data, checksum)
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ err = writer.Write(metadata, data, checksum)
require.NoError(t, err)
// Write valid checksum.
@@ -300,7 +310,9 @@ func TestFixFileSetInvalidChecksum(t *testing.T) {
data.IncRef()
checksum = digest.Checksum(data.Bytes())
- err = writer.Write(id, tags, data, checksum)
+ metadata = persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ err = writer.Write(metadata, data, checksum)
require.NoError(t, err)
// Close volume.
diff --git a/src/collector/README.md b/src/collector/README.md
index 7cda4a33af..4ab5895fe6 100644
--- a/src/collector/README.md
+++ b/src/collector/README.md
@@ -1,17 +1,6 @@
## WARNING: This is Alpha software and not intended for use until a stable release.
-# m3collector [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# m3collector
Metrics collection agent. Responsible for collecting metrics and forwarding them to
downstream services (e.g., for aggregation or permanent storage).
-
-
-
-This project is released under the [Apache License, Version 2.0](LICENSE).
-
-[doc-img]: https://godoc.org/github.com/m3db/m3collector?status.svg
-[doc]: https://godoc.org/github.com/m3db/m3collector
-[ci-img]: https://travis-ci.org/m3db/m3collector.svg?branch=master
-[ci]: https://travis-ci.org/m3db/m3collector
-[cov-img]: https://coveralls.io/repos/m3db/m3collector/badge.svg?branch=master&service=github
-[cov]: https://coveralls.io/github/m3db/m3collector?branch=master
diff --git a/src/collector/api/v1/handler/json/report_test.go b/src/collector/api/v1/handler/json/report_test.go
index 98680265ac..db9a16c607 100644
--- a/src/collector/api/v1/handler/json/report_test.go
+++ b/src/collector/api/v1/handler/json/report_test.go
@@ -30,9 +30,9 @@ import (
"github.com/m3db/m3/src/collector/reporter"
"github.com/m3db/m3/src/metrics/metric/id"
- "github.com/m3db/m3/src/x/serialize"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
+ "github.com/m3db/m3/src/x/serialize"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -191,10 +191,12 @@ func newTestReportHandler(ctrl *gomock.Controller) testReportHandler {
reporter := reporter.NewMockReporter(ctrl)
poolOpts := pool.NewObjectPoolOptions().SetSize(1)
tagEncoderPool := serialize.NewTagEncoderPool(
- serialize.NewTagEncoderOptions(), poolOpts)
+ serialize.NewTagEncoderOptions(),
+ poolOpts)
tagEncoderPool.Init()
tagDecoderPool := serialize.NewTagDecoderPool(
- serialize.NewTagDecoderOptions(), poolOpts)
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
+ poolOpts)
tagDecoderPool.Init()
instrumentOpts := instrument.NewOptions()
diff --git a/src/collector/integration/setup.go b/src/collector/integration/setup.go
index c8bd9a3c36..d2227b88de 100644
--- a/src/collector/integration/setup.go
+++ b/src/collector/integration/setup.go
@@ -24,6 +24,7 @@ import (
"errors"
"flag"
"testing"
+ "time"
aggclient "github.com/m3db/m3/src/aggregator/client"
aggserver "github.com/m3db/m3/src/collector/integration/server"
@@ -59,7 +60,11 @@ func newTestSetup(t *testing.T, opts testOptions) *testSetup {
cache := cache.NewCache(opts.CacheOptions())
matcher, err := matcher.NewMatcher(cache, opts.MatcherOptions())
require.NoError(t, err)
- client := aggclient.NewClient(opts.AggregatorClientOptions())
+ clientOpts := opts.AggregatorClientOptions().
+ SetConnectionOptions(opts.AggregatorClientOptions().ConnectionOptions().
+ SetWriteTimeout(time.Second)) // CI is slow, prefer slow write than drop data.
+ client, err := aggclient.NewClient(clientOpts)
+ require.NoError(t, err)
reporter := aggreporter.NewReporter(matcher, client, opts.AggregatorReporterOptions())
// Create server.
diff --git a/src/collector/reporter/m3aggregator/reporter.go b/src/collector/reporter/m3aggregator/reporter.go
index b29365cc36..90a98aef1c 100644
--- a/src/collector/reporter/m3aggregator/reporter.go
+++ b/src/collector/reporter/m3aggregator/reporter.go
@@ -55,7 +55,7 @@ type reporterMetrics struct {
func newReporterMetrics(instrumentOpts instrument.Options) reporterMetrics {
scope := instrumentOpts.MetricsScope()
- samplingRate := instrumentOpts.MetricsSamplingRate()
+ timerOpts := instrumentOpts.TimerOptions()
hostName := "unknown"
if name, err := os.Hostname(); err == nil {
hostName = name
@@ -64,10 +64,10 @@ func newReporterMetrics(instrumentOpts instrument.Options) reporterMetrics {
}
hostScope := scope.Tagged(map[string]string{"host": hostName})
return reporterMetrics{
- reportCounter: instrument.NewMethodMetrics(scope, "report-counter", samplingRate),
- reportBatchTimer: instrument.NewMethodMetrics(scope, "report-batch-timer", samplingRate),
- reportGauge: instrument.NewMethodMetrics(scope, "report-gauge", samplingRate),
- flush: instrument.NewMethodMetrics(scope, "flush", samplingRate),
+ reportCounter: instrument.NewMethodMetrics(scope, "report-counter", timerOpts),
+ reportBatchTimer: instrument.NewMethodMetrics(scope, "report-batch-timer", timerOpts),
+ reportGauge: instrument.NewMethodMetrics(scope, "report-gauge", timerOpts),
+ flush: instrument.NewMethodMetrics(scope, "flush", timerOpts),
reportPending: hostScope.Gauge("report-pending"),
}
}
@@ -122,9 +122,9 @@ func (r *reporter) ReportCounter(id id.ID, value int64) error {
counter := unaggregated.Counter{ID: id.Bytes(), Value: value}
matchResult := r.matcher.ForwardMatch(id, fromNanos, toNanos)
- matchExisting, _ := matchResult.ForExistingIDAt(fromNanos).ApplyOrRemoveDropPolicies()
- if !matchExisting.IsDropPolicyApplied() {
- err := r.client.WriteUntimedCounter(counter, matchExisting)
+ stagedMetadatas := matchResult.ForExistingIDAt(fromNanos)
+ if !stagedMetadatas.IsDropPolicyApplied() {
+ err := r.client.WriteUntimedCounter(counter, stagedMetadatas)
if err != nil {
multiErr = multiErr.Add(err)
}
@@ -162,9 +162,9 @@ func (r *reporter) ReportBatchTimer(id id.ID, value []float64) error {
batchTimer := unaggregated.BatchTimer{ID: id.Bytes(), Values: value}
matchResult := r.matcher.ForwardMatch(id, fromNanos, toNanos)
- matchExisting, _ := matchResult.ForExistingIDAt(fromNanos).ApplyOrRemoveDropPolicies()
- if !matchExisting.IsDropPolicyApplied() {
- err := r.client.WriteUntimedBatchTimer(batchTimer, matchExisting)
+ stagedMetadatas := matchResult.ForExistingIDAt(fromNanos)
+ if !stagedMetadatas.IsDropPolicyApplied() {
+ err := r.client.WriteUntimedBatchTimer(batchTimer, stagedMetadatas)
if err != nil {
multiErr = multiErr.Add(err)
}
@@ -201,9 +201,9 @@ func (r *reporter) ReportGauge(id id.ID, value float64) error {
gauge := unaggregated.Gauge{ID: id.Bytes(), Value: value}
matchResult := r.matcher.ForwardMatch(id, fromNanos, toNanos)
- matchExisting, _ := matchResult.ForExistingIDAt(fromNanos).ApplyOrRemoveDropPolicies()
- if !matchExisting.IsDropPolicyApplied() {
- err := r.client.WriteUntimedGauge(gauge, matchExisting)
+ stagedMetadatas := matchResult.ForExistingIDAt(fromNanos)
+ if !stagedMetadatas.IsDropPolicyApplied() {
+ err := r.client.WriteUntimedGauge(gauge, stagedMetadatas)
if err != nil {
multiErr = multiErr.Add(err)
}
diff --git a/src/collector/reporter/m3aggregator/reporter_test.go b/src/collector/reporter/m3aggregator/reporter_test.go
index 4ce4bde77a..9e8a4b2554 100644
--- a/src/collector/reporter/m3aggregator/reporter_test.go
+++ b/src/collector/reporter/m3aggregator/reporter_test.go
@@ -160,10 +160,7 @@ var (
testMatchForNewRollupIDs)
testMatchDropPolicyNotYetEffectiveResult = rules.NewMatchResult(0, math.MaxInt64,
- append(testMatchForExistingID, metadata.StagedMetadata{
- Metadata: metadata.DropMetadata,
- CutoverNanos: testNow.Add(-1 * (testNegativeSkew / 2)).UnixNano(),
- }),
+ testMatchForExistingID,
testMatchForNewRollupIDs)
)
diff --git a/src/collector/server/server.go b/src/collector/server/server.go
index 819564e51c..e28578a424 100644
--- a/src/collector/server/server.go
+++ b/src/collector/server/server.go
@@ -28,13 +28,16 @@ import (
"os/signal"
"syscall"
+ "github.com/m3db/m3/src/aggregator/server"
clusterclient "github.com/m3db/m3/src/cluster/client"
+ "github.com/m3db/m3/src/cmd/services/m3aggregator/serve"
"github.com/m3db/m3/src/cmd/services/m3collector/config"
"github.com/m3db/m3/src/collector/api/v1/httpd"
"github.com/m3db/m3/src/collector/reporter"
"github.com/m3db/m3/src/collector/reporter/m3aggregator"
xconfig "github.com/m3db/m3/src/x/config"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
@@ -50,6 +53,9 @@ type RunOptions struct {
// InterruptCh is a programmatic interrupt channel to supply to
// interrupt and shutdown the server.
InterruptCh <-chan error
+
+ // AggregatorServerOptions are server options for aggregator.
+ AggregatorServerOptions []server.AdminOption
}
// Run runs the server programmatically given a filename for the configuration file.
@@ -85,14 +91,25 @@ func Run(runOpts RunOptions) {
logger.Fatal("could not create etcd client", zap.Error(err))
}
+ serveOptions := serve.NewOptions(instrumentOpts)
+ for i, transform := range runOpts.AggregatorServerOptions {
+ if opts, err := transform(serveOptions); err != nil {
+ logger.Fatal("could not apply transform",
+ zap.Int("index", i), zap.Error(err))
+ } else {
+ serveOptions = opts
+ }
+ }
+
+ rwOpts := serveOptions.RWOptions()
logger.Info("creating reporter")
- reporter, err := newReporter(cfg.Reporter, clusterClient, instrumentOpts)
+ reporter, err := newReporter(cfg.Reporter, clusterClient, instrumentOpts, rwOpts)
if err != nil {
logger.Fatal("could not create reporter", zap.Error(err))
}
tagEncoderOptions := serialize.NewTagEncoderOptions()
- tagDecoderOptions := serialize.NewTagDecoderOptions()
+ tagDecoderOptions := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{})
tagEncoderPoolOptions := pool.NewObjectPoolOptions().
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
@@ -167,6 +184,7 @@ func newReporter(
cfg config.ReporterConfiguration,
clusterClient clusterclient.Client,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (reporter.Reporter, error) {
scope := instrumentOpts.MetricsScope()
logger := instrumentOpts.Logger()
@@ -185,7 +203,7 @@ func newReporter(
logger.Info("creating aggregator client")
aggClient, err := cfg.Client.NewClient(clusterClient, clockOpts,
- instrumentOpts.SetMetricsScope(scope.SubScope("backend")))
+ instrumentOpts.SetMetricsScope(scope.SubScope("backend")), rwOpts)
if err != nil {
return nil, fmt.Errorf("unable to create agg tier client: %v", err)
}
diff --git a/src/ctl/public/r2/v1/swagger/swagger.json b/src/ctl/public/r2/v1/swagger/swagger.json
index 6a7523be34..90d569d0a3 100644
--- a/src/ctl/public/r2/v1/swagger/swagger.json
+++ b/src/ctl/public/r2/v1/swagger/swagger.json
@@ -6,7 +6,7 @@
"title": "R2 API",
"license": {
"name": "MIT",
- "url": "https://github.com/m3db/m3ctl/blob/master/LICENSE.md"
+ "url": "https://github.com/m3db/m3/blob/master/LICENSE.md"
}
},
"host": "localhost:9000",
diff --git a/src/ctl/service/r2/routes_test.go b/src/ctl/service/r2/routes_test.go
index d12e736e6f..eb0b5ba823 100644
--- a/src/ctl/service/r2/routes_test.go
+++ b/src/ctl/service/r2/routes_test.go
@@ -300,7 +300,7 @@ func newTestService(store store.Store) *service {
}
iOpts := instrument.NewOptions()
return &service{
- metrics: newServiceMetrics(iOpts.MetricsScope(), iOpts.MetricsSamplingRate()),
+ metrics: newServiceMetrics(iOpts.MetricsScope(), iOpts.TimerOptions()),
nowFn: clock.NewOptions().NowFn(),
store: store,
authService: auth.NewNoopAuth(),
@@ -329,7 +329,7 @@ func newTestDeleteRequest() *http.Request {
}
func newTestInstrumentMethodMetrics() instrument.MethodMetrics {
- return instrument.NewMethodMetrics(tally.NoopScope, "testRoute", 1.0)
+ return instrument.NewMethodMetrics(tally.NoopScope, "testRoute", instrument.TimerOptions{})
}
func newTestBulkReqBody() updateRuleSetRequest {
diff --git a/src/ctl/service/r2/service.go b/src/ctl/service/r2/service.go
index de719cace4..debfefbff1 100644
--- a/src/ctl/service/r2/service.go
+++ b/src/ctl/service/r2/service.go
@@ -80,24 +80,24 @@ type serviceMetrics struct {
updateRuleSet instrument.MethodMetrics
}
-func newServiceMetrics(scope tally.Scope, samplingRate float64) serviceMetrics {
+func newServiceMetrics(scope tally.Scope, opts instrument.TimerOptions) serviceMetrics {
return serviceMetrics{
- fetchNamespaces: instrument.NewMethodMetrics(scope, "fetchNamespaces", samplingRate),
- fetchNamespace: instrument.NewMethodMetrics(scope, "fetchNamespace", samplingRate),
- createNamespace: instrument.NewMethodMetrics(scope, "createNamespace", samplingRate),
- deleteNamespace: instrument.NewMethodMetrics(scope, "deleteNamespace", samplingRate),
- validateRuleSet: instrument.NewMethodMetrics(scope, "validateRuleSet", samplingRate),
- fetchMappingRule: instrument.NewMethodMetrics(scope, "fetchMappingRule", samplingRate),
- createMappingRule: instrument.NewMethodMetrics(scope, "createMappingRule", samplingRate),
- updateMappingRule: instrument.NewMethodMetrics(scope, "updateMappingRule", samplingRate),
- deleteMappingRule: instrument.NewMethodMetrics(scope, "deleteMappingRule", samplingRate),
- fetchMappingRuleHistory: instrument.NewMethodMetrics(scope, "fetchMappingRuleHistory", samplingRate),
- fetchRollupRule: instrument.NewMethodMetrics(scope, "fetchRollupRule", samplingRate),
- createRollupRule: instrument.NewMethodMetrics(scope, "createRollupRule", samplingRate),
- updateRollupRule: instrument.NewMethodMetrics(scope, "updateRollupRule", samplingRate),
- deleteRollupRule: instrument.NewMethodMetrics(scope, "deleteRollupRule", samplingRate),
- fetchRollupRuleHistory: instrument.NewMethodMetrics(scope, "fetchRollupRuleHistory", samplingRate),
- updateRuleSet: instrument.NewMethodMetrics(scope, "updateRuleSet", samplingRate),
+ fetchNamespaces: instrument.NewMethodMetrics(scope, "fetchNamespaces", opts),
+ fetchNamespace: instrument.NewMethodMetrics(scope, "fetchNamespace", opts),
+ createNamespace: instrument.NewMethodMetrics(scope, "createNamespace", opts),
+ deleteNamespace: instrument.NewMethodMetrics(scope, "deleteNamespace", opts),
+ validateRuleSet: instrument.NewMethodMetrics(scope, "validateRuleSet", opts),
+ fetchMappingRule: instrument.NewMethodMetrics(scope, "fetchMappingRule", opts),
+ createMappingRule: instrument.NewMethodMetrics(scope, "createMappingRule", opts),
+ updateMappingRule: instrument.NewMethodMetrics(scope, "updateMappingRule", opts),
+ deleteMappingRule: instrument.NewMethodMetrics(scope, "deleteMappingRule", opts),
+ fetchMappingRuleHistory: instrument.NewMethodMetrics(scope, "fetchMappingRuleHistory", opts),
+ fetchRollupRule: instrument.NewMethodMetrics(scope, "fetchRollupRule", opts),
+ createRollupRule: instrument.NewMethodMetrics(scope, "createRollupRule", opts),
+ updateRollupRule: instrument.NewMethodMetrics(scope, "updateRollupRule", opts),
+ deleteRollupRule: instrument.NewMethodMetrics(scope, "deleteRollupRule", opts),
+ fetchRollupRuleHistory: instrument.NewMethodMetrics(scope, "fetchRollupRuleHistory", opts),
+ updateRuleSet: instrument.NewMethodMetrics(scope, "updateRuleSet", opts),
}
}
@@ -154,7 +154,7 @@ func NewService(
authService: authService,
logger: iOpts.Logger(),
nowFn: clockOpts.NowFn(),
- metrics: newServiceMetrics(iOpts.MetricsScope(), iOpts.MetricsSamplingRate()),
+ metrics: newServiceMetrics(iOpts.MetricsScope(), iOpts.TimerOptions()),
}
}
diff --git a/src/ctl/ui/yarn.lock b/src/ctl/ui/yarn.lock
index 236f29ff10..7659e24833 100644
--- a/src/ctl/ui/yarn.lock
+++ b/src/ctl/ui/yarn.lock
@@ -3342,10 +3342,10 @@ event-emitter@~0.3.5:
d "1"
es5-ext "~0.10.14"
-eventemitter3@^3.0.0:
- version "3.1.2"
- resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-3.1.2.tgz#2d3d48f9c346698fce83a85d7d664e98535df6e7"
- integrity sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q==
+eventemitter3@^4.0.0:
+ version "4.0.7"
+ resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f"
+ integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==
eventlistener@0.0.1:
version "0.0.1"
@@ -3747,11 +3747,9 @@ follow-redirects@1.5.10:
debug "=3.1.0"
follow-redirects@^1.0.0:
- version "1.7.0"
- resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.7.0.tgz#489ebc198dc0e7f64167bd23b03c4c19b5784c76"
- integrity sha512-m/pZQy4Gj287eNy94nivy5wchN3Kp+Q5WgUPNy5lJSZ3sgkVKSYV/ZChMAQVIgx1SqfZ2zBZtPA2YlXIWxxJOQ==
- dependencies:
- debug "^3.2.6"
+ version "1.13.0"
+ resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.13.0.tgz#b42e8d93a2a7eea5ed88633676d6597bc8e384db"
+ integrity sha512-aq6gF1BEKje4a9i9+5jimNFIpq4Q1WiwBToeRK5NvZBd/TRsmW8BsJfOEGkr76TbOyPVD3OVDN910EcUNtRYEA==
for-in@^1.0.1, for-in@^1.0.2:
version "1.0.2"
@@ -4406,11 +4404,11 @@ http-proxy-middleware@~0.17.4:
micromatch "^2.3.11"
http-proxy@^1.16.2:
- version "1.17.0"
- resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.17.0.tgz#7ad38494658f84605e2f6db4436df410f4e5be9a"
- integrity sha512-Taqn+3nNvYRfJ3bGvKfBSRwy1v6eePlm3oc/aWVxZp57DQr5Eq3xhKJi7Z4hZpS8PC3H4qI+Yly5EmFacGuA/g==
+ version "1.18.1"
+ resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.1.tgz#401541f0534884bbf95260334e72f88ee3976549"
+ integrity sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==
dependencies:
- eventemitter3 "^3.0.0"
+ eventemitter3 "^4.0.0"
follow-redirects "^1.0.0"
requires-port "^1.0.0"
@@ -9688,9 +9686,9 @@ websocket-driver@>=0.5.1:
websocket-extensions ">=0.1.1"
websocket-extensions@>=0.1.1:
- version "0.1.3"
- resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29"
- integrity sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==
+ version "0.1.4"
+ resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz#7f8473bc839dfd87608adb95d7eb075211578a42"
+ integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==
whatwg-encoding@^1.0.1:
version "1.0.5"
diff --git a/src/dbnode/client/client_mock.go b/src/dbnode/client/client_mock.go
index 0232b717ec..9da52ea71a 100644
--- a/src/dbnode/client/client_mock.go
+++ b/src/dbnode/client/client_mock.go
@@ -42,12 +42,13 @@ import (
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/retry"
+ "github.com/m3db/m3/src/x/sampler"
"github.com/m3db/m3/src/x/serialize"
"github.com/m3db/m3/src/x/sync"
time0 "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
- tchannel_go "github.com/uber/tchannel-go"
+ tchannel "github.com/uber/tchannel-go"
)
// MockClient is a mock of Client interface
@@ -1187,6 +1188,34 @@ func (mr *MockOptionsMockRecorder) InstrumentOptions() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstrumentOptions", reflect.TypeOf((*MockOptions)(nil).InstrumentOptions))
}
+// SetLogErrorSampleRate mocks base method
+func (m *MockOptions) SetLogErrorSampleRate(value sampler.Rate) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetLogErrorSampleRate", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetLogErrorSampleRate indicates an expected call of SetLogErrorSampleRate
+func (mr *MockOptionsMockRecorder) SetLogErrorSampleRate(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLogErrorSampleRate", reflect.TypeOf((*MockOptions)(nil).SetLogErrorSampleRate), value)
+}
+
+// LogErrorSampleRate mocks base method
+func (m *MockOptions) LogErrorSampleRate() sampler.Rate {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LogErrorSampleRate")
+ ret0, _ := ret[0].(sampler.Rate)
+ return ret0
+}
+
+// LogErrorSampleRate indicates an expected call of LogErrorSampleRate
+func (mr *MockOptionsMockRecorder) LogErrorSampleRate() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogErrorSampleRate", reflect.TypeOf((*MockOptions)(nil).LogErrorSampleRate))
+}
+
// SetTopologyInitializer mocks base method
func (m *MockOptions) SetTopologyInitializer(value topology.Initializer) Options {
m.ctrl.T.Helper()
@@ -1272,7 +1301,7 @@ func (mr *MockOptionsMockRecorder) WriteConsistencyLevel() *gomock.Call {
}
// SetChannelOptions mocks base method
-func (m *MockOptions) SetChannelOptions(value *tchannel_go.ChannelOptions) Options {
+func (m *MockOptions) SetChannelOptions(value *tchannel.ChannelOptions) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetChannelOptions", value)
ret0, _ := ret[0].(Options)
@@ -1286,10 +1315,10 @@ func (mr *MockOptionsMockRecorder) SetChannelOptions(value interface{}) *gomock.
}
// ChannelOptions mocks base method
-func (m *MockOptions) ChannelOptions() *tchannel_go.ChannelOptions {
+func (m *MockOptions) ChannelOptions() *tchannel.ChannelOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ChannelOptions")
- ret0, _ := ret[0].(*tchannel_go.ChannelOptions)
+ ret0, _ := ret[0].(*tchannel.ChannelOptions)
return ret0
}
@@ -2391,6 +2420,90 @@ func (mr *MockOptionsMockRecorder) UseV2BatchAPIs() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UseV2BatchAPIs", reflect.TypeOf((*MockOptions)(nil).UseV2BatchAPIs))
}
+// SetIterationOptions mocks base method
+func (m *MockOptions) SetIterationOptions(arg0 index.IterationOptions) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetIterationOptions", arg0)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetIterationOptions indicates an expected call of SetIterationOptions
+func (mr *MockOptionsMockRecorder) SetIterationOptions(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIterationOptions", reflect.TypeOf((*MockOptions)(nil).SetIterationOptions), arg0)
+}
+
+// IterationOptions mocks base method
+func (m *MockOptions) IterationOptions() index.IterationOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IterationOptions")
+ ret0, _ := ret[0].(index.IterationOptions)
+ return ret0
+}
+
+// IterationOptions indicates an expected call of IterationOptions
+func (mr *MockOptionsMockRecorder) IterationOptions() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IterationOptions", reflect.TypeOf((*MockOptions)(nil).IterationOptions))
+}
+
+// SetWriteTimestampOffset mocks base method
+func (m *MockOptions) SetWriteTimestampOffset(value time.Duration) AdminOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetWriteTimestampOffset", value)
+ ret0, _ := ret[0].(AdminOptions)
+ return ret0
+}
+
+// SetWriteTimestampOffset indicates an expected call of SetWriteTimestampOffset
+func (mr *MockOptionsMockRecorder) SetWriteTimestampOffset(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteTimestampOffset", reflect.TypeOf((*MockOptions)(nil).SetWriteTimestampOffset), value)
+}
+
+// WriteTimestampOffset mocks base method
+func (m *MockOptions) WriteTimestampOffset() time.Duration {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WriteTimestampOffset")
+ ret0, _ := ret[0].(time.Duration)
+ return ret0
+}
+
+// WriteTimestampOffset indicates an expected call of WriteTimestampOffset
+func (mr *MockOptionsMockRecorder) WriteTimestampOffset() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTimestampOffset", reflect.TypeOf((*MockOptions)(nil).WriteTimestampOffset))
+}
+
+// SetNewConnectionFn mocks base method
+func (m *MockOptions) SetNewConnectionFn(value NewConnectionFn) AdminOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetNewConnectionFn", value)
+ ret0, _ := ret[0].(AdminOptions)
+ return ret0
+}
+
+// SetNewConnectionFn indicates an expected call of SetNewConnectionFn
+func (mr *MockOptionsMockRecorder) SetNewConnectionFn(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNewConnectionFn", reflect.TypeOf((*MockOptions)(nil).SetNewConnectionFn), value)
+}
+
+// NewConnectionFn mocks base method
+func (m *MockOptions) NewConnectionFn() NewConnectionFn {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NewConnectionFn")
+ ret0, _ := ret[0].(NewConnectionFn)
+ return ret0
+}
+
+// NewConnectionFn indicates an expected call of NewConnectionFn
+func (mr *MockOptionsMockRecorder) NewConnectionFn() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewConnectionFn", reflect.TypeOf((*MockOptions)(nil).NewConnectionFn))
+}
+
// MockAdminOptions is a mock of AdminOptions interface
type MockAdminOptions struct {
ctrl *gomock.Controller
@@ -2554,6 +2667,34 @@ func (mr *MockAdminOptionsMockRecorder) InstrumentOptions() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstrumentOptions", reflect.TypeOf((*MockAdminOptions)(nil).InstrumentOptions))
}
+// SetLogErrorSampleRate mocks base method
+func (m *MockAdminOptions) SetLogErrorSampleRate(value sampler.Rate) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetLogErrorSampleRate", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetLogErrorSampleRate indicates an expected call of SetLogErrorSampleRate
+func (mr *MockAdminOptionsMockRecorder) SetLogErrorSampleRate(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLogErrorSampleRate", reflect.TypeOf((*MockAdminOptions)(nil).SetLogErrorSampleRate), value)
+}
+
+// LogErrorSampleRate mocks base method
+func (m *MockAdminOptions) LogErrorSampleRate() sampler.Rate {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LogErrorSampleRate")
+ ret0, _ := ret[0].(sampler.Rate)
+ return ret0
+}
+
+// LogErrorSampleRate indicates an expected call of LogErrorSampleRate
+func (mr *MockAdminOptionsMockRecorder) LogErrorSampleRate() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogErrorSampleRate", reflect.TypeOf((*MockAdminOptions)(nil).LogErrorSampleRate))
+}
+
// SetTopologyInitializer mocks base method
func (m *MockAdminOptions) SetTopologyInitializer(value topology.Initializer) Options {
m.ctrl.T.Helper()
@@ -2639,7 +2780,7 @@ func (mr *MockAdminOptionsMockRecorder) WriteConsistencyLevel() *gomock.Call {
}
// SetChannelOptions mocks base method
-func (m *MockAdminOptions) SetChannelOptions(value *tchannel_go.ChannelOptions) Options {
+func (m *MockAdminOptions) SetChannelOptions(value *tchannel.ChannelOptions) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetChannelOptions", value)
ret0, _ := ret[0].(Options)
@@ -2653,10 +2794,10 @@ func (mr *MockAdminOptionsMockRecorder) SetChannelOptions(value interface{}) *go
}
// ChannelOptions mocks base method
-func (m *MockAdminOptions) ChannelOptions() *tchannel_go.ChannelOptions {
+func (m *MockAdminOptions) ChannelOptions() *tchannel.ChannelOptions {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ChannelOptions")
- ret0, _ := ret[0].(*tchannel_go.ChannelOptions)
+ ret0, _ := ret[0].(*tchannel.ChannelOptions)
return ret0
}
@@ -3758,6 +3899,90 @@ func (mr *MockAdminOptionsMockRecorder) UseV2BatchAPIs() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UseV2BatchAPIs", reflect.TypeOf((*MockAdminOptions)(nil).UseV2BatchAPIs))
}
+// SetIterationOptions mocks base method
+func (m *MockAdminOptions) SetIterationOptions(arg0 index.IterationOptions) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetIterationOptions", arg0)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetIterationOptions indicates an expected call of SetIterationOptions
+func (mr *MockAdminOptionsMockRecorder) SetIterationOptions(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIterationOptions", reflect.TypeOf((*MockAdminOptions)(nil).SetIterationOptions), arg0)
+}
+
+// IterationOptions mocks base method
+func (m *MockAdminOptions) IterationOptions() index.IterationOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IterationOptions")
+ ret0, _ := ret[0].(index.IterationOptions)
+ return ret0
+}
+
+// IterationOptions indicates an expected call of IterationOptions
+func (mr *MockAdminOptionsMockRecorder) IterationOptions() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IterationOptions", reflect.TypeOf((*MockAdminOptions)(nil).IterationOptions))
+}
+
+// SetWriteTimestampOffset mocks base method
+func (m *MockAdminOptions) SetWriteTimestampOffset(value time.Duration) AdminOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetWriteTimestampOffset", value)
+ ret0, _ := ret[0].(AdminOptions)
+ return ret0
+}
+
+// SetWriteTimestampOffset indicates an expected call of SetWriteTimestampOffset
+func (mr *MockAdminOptionsMockRecorder) SetWriteTimestampOffset(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteTimestampOffset", reflect.TypeOf((*MockAdminOptions)(nil).SetWriteTimestampOffset), value)
+}
+
+// WriteTimestampOffset mocks base method
+func (m *MockAdminOptions) WriteTimestampOffset() time.Duration {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WriteTimestampOffset")
+ ret0, _ := ret[0].(time.Duration)
+ return ret0
+}
+
+// WriteTimestampOffset indicates an expected call of WriteTimestampOffset
+func (mr *MockAdminOptionsMockRecorder) WriteTimestampOffset() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTimestampOffset", reflect.TypeOf((*MockAdminOptions)(nil).WriteTimestampOffset))
+}
+
+// SetNewConnectionFn mocks base method
+func (m *MockAdminOptions) SetNewConnectionFn(value NewConnectionFn) AdminOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetNewConnectionFn", value)
+ ret0, _ := ret[0].(AdminOptions)
+ return ret0
+}
+
+// SetNewConnectionFn indicates an expected call of SetNewConnectionFn
+func (mr *MockAdminOptionsMockRecorder) SetNewConnectionFn(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNewConnectionFn", reflect.TypeOf((*MockAdminOptions)(nil).SetNewConnectionFn), value)
+}
+
+// NewConnectionFn mocks base method
+func (m *MockAdminOptions) NewConnectionFn() NewConnectionFn {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NewConnectionFn")
+ ret0, _ := ret[0].(NewConnectionFn)
+ return ret0
+}
+
+// NewConnectionFn indicates an expected call of NewConnectionFn
+func (mr *MockAdminOptionsMockRecorder) NewConnectionFn() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewConnectionFn", reflect.TypeOf((*MockAdminOptions)(nil).NewConnectionFn))
+}
+
// SetOrigin mocks base method
func (m *MockAdminOptions) SetOrigin(value topology.Host) AdminOptions {
m.ctrl.T.Helper()
diff --git a/src/dbnode/client/config.go b/src/dbnode/client/config.go
index 347b53ab77..cea4240e87 100644
--- a/src/dbnode/client/config.go
+++ b/src/dbnode/client/config.go
@@ -35,6 +35,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/retry"
+ "github.com/m3db/m3/src/x/sampler"
xsync "github.com/m3db/m3/src/x/sync"
)
@@ -76,6 +77,9 @@ type Configuration struct {
// FetchRetry is the fetch retry config.
FetchRetry *retry.Configuration `yaml:"fetchRetry"`
+ // LogErrorSampleRate is the log error sample rate.
+ LogErrorSampleRate sampler.Rate `yaml:"logErrorSampleRate"`
+
// BackgroundHealthCheckFailLimit is the amount of times a background check
// must fail before a connection is taken out of consideration.
BackgroundHealthCheckFailLimit *int `yaml:"backgroundHealthCheckFailLimit"`
@@ -99,6 +103,17 @@ type Configuration struct {
// UseV2BatchAPIs determines whether the V2 batch APIs are used. Note that the M3DB nodes must
// have support for the V2 APIs in order for this feature to be used.
UseV2BatchAPIs *bool `yaml:"useV2BatchAPIs"`
+
+ // WriteTimestampOffset offsets all writes by specified duration into the past.
+ WriteTimestampOffset *time.Duration `yaml:"writeTimestampOffset"`
+
+ // FetchSeriesBlocksBatchConcurrency sets the number of batches of blocks to retrieve
+ // in parallel from a remote peer. Defaults to NumCPU / 2.
+ FetchSeriesBlocksBatchConcurrency *int `yaml:"fetchSeriesBlocksBatchConcurrency"`
+
+ // FetchSeriesBlocksBatchSize sets the number of blocks to retrieve in a single batch
+ // from the remote peer. Defaults to 4096.
+ FetchSeriesBlocksBatchSize *int `yaml:"fetchSeriesBlocksBatchSize"`
}
// ProtoConfiguration is the configuration for running with ProtoDataMode enabled.
@@ -158,6 +173,10 @@ func (c *Configuration) Validate() error {
return fmt.Errorf("m3db client connectTimeout was: %d but must be >= 0", *c.ConnectTimeout)
}
+ if err := c.LogErrorSampleRate.Validate(); err != nil {
+ return fmt.Errorf("m3db client error validating log error sample rate: %v", err)
+ }
+
if c.BackgroundHealthCheckFailLimit != nil &&
(*c.BackgroundHealthCheckFailLimit < 0 || *c.BackgroundHealthCheckFailLimit > 10) {
return fmt.Errorf(
@@ -296,7 +315,8 @@ func (c Configuration) NewAdminClient(
v := NewAdminOptions().
SetTopologyInitializer(syncTopoInit).
SetAsyncTopologyInitializers(asyncTopoInits).
- SetInstrumentOptions(iopts)
+ SetInstrumentOptions(iopts).
+ SetLogErrorSampleRate(c.LogErrorSampleRate)
if c.UseV2BatchAPIs != nil {
v = v.SetUseV2BatchAPIs(*c.UseV2BatchAPIs)
@@ -352,9 +372,21 @@ func (c Configuration) NewAdminClient(
}
if c.WriteRetry != nil {
v = v.SetWriteRetrier(c.WriteRetry.NewRetrier(writeRequestScope))
+ } else {
+ // Have not set write retry explicitly, but would like metrics
+ // emitted for the write retrier with the scope for write requests.
+ retrierOpts := v.WriteRetrier().Options().
+ SetMetricsScope(writeRequestScope)
+ v = v.SetWriteRetrier(retry.NewRetrier(retrierOpts))
}
if c.FetchRetry != nil {
v = v.SetFetchRetrier(c.FetchRetry.NewRetrier(fetchRequestScope))
+ } else {
+ // Have not set fetch retry explicitly, but would like metrics
+ // emitted for the fetch retrier with the scope for fetch requests.
+ retrierOpts := v.FetchRetrier().Options().
+ SetMetricsScope(fetchRequestScope)
+ v = v.SetFetchRetrier(retry.NewRetrier(retrierOpts))
}
if syncClientOverrides.TargetHostQueueFlushSize != nil {
v = v.SetHostQueueOpsFlushSize(*syncClientOverrides.TargetHostQueueFlushSize)
@@ -387,8 +419,21 @@ func (c Configuration) NewAdminClient(
v = v.SetSchemaRegistry(schemaRegistry)
}
- // Apply programtic custom options last
+ // Cast to admin options to apply admin config options.
opts := v.(AdminOptions)
+
+ if c.WriteTimestampOffset != nil {
+ opts = opts.SetWriteTimestampOffset(*c.WriteTimestampOffset)
+ }
+
+ if c.FetchSeriesBlocksBatchConcurrency != nil {
+ opts = opts.SetFetchSeriesBlocksBatchConcurrency(*c.FetchSeriesBlocksBatchConcurrency)
+ }
+ if c.FetchSeriesBlocksBatchSize != nil {
+ opts = opts.SetFetchSeriesBlocksBatchSize(*c.FetchSeriesBlocksBatchSize)
+ }
+
+ // Apply programmatic custom options last.
for _, opt := range custom {
opts = opt(opts)
}
diff --git a/src/dbnode/client/connection_pool.go b/src/dbnode/client/connection_pool.go
index 47d7a7d0b5..bcfc04973c 100644
--- a/src/dbnode/client/connection_pool.go
+++ b/src/dbnode/client/connection_pool.go
@@ -30,12 +30,10 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
- nchannel "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/node/channel"
"github.com/m3db/m3/src/dbnode/topology"
xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/stackmurmur3/v2"
- "github.com/spaolacci/murmur3"
- "github.com/uber/tchannel-go"
"github.com/uber/tchannel-go/thrift"
"go.uber.org/zap"
)
@@ -59,7 +57,6 @@ type connPool struct {
used int64
connectRand rand.Source
healthCheckRand rand.Source
- newConn newConnFn
healthCheckNewConn healthCheckFn
healthCheck healthCheckFn
sleepConnect sleepFn
@@ -73,17 +70,17 @@ type conn struct {
client rpc.TChanNode
}
-type newConnFn func(channelName string, addr string, opts Options) (xclose.SimpleCloser, rpc.TChanNode, error)
+// NewConnectionFn is a function that creates a connection.
+type NewConnectionFn func(
+ channelName string, addr string, opts Options,
+) (xclose.SimpleCloser, rpc.TChanNode, error)
type healthCheckFn func(client rpc.TChanNode, opts Options) error
type sleepFn func(t time.Duration)
-// Allow for test override
-var globalNewConn = newConn
-
func newConnectionPool(host topology.Host, opts Options) connectionPool {
- seed := int64(murmur3.Sum32([]byte(host.Address())))
+ seed := int64(murmur3.StringSum32(host.Address()))
p := &connPool{
opts: opts,
@@ -92,7 +89,6 @@ func newConnectionPool(host topology.Host, opts Options) connectionPool {
poolLen: 0,
connectRand: rand.NewSource(seed),
healthCheckRand: rand.NewSource(seed + 1),
- newConn: globalNewConn,
healthCheckNewConn: healthCheck,
healthCheck: healthCheck,
sleepConnect: time.Sleep,
@@ -177,11 +173,12 @@ func (p *connPool) connectEvery(interval time.Duration, stutter time.Duration) {
var wg sync.WaitGroup
for i := 0; i < target-poolLen; i++ {
wg.Add(1)
+ newConnFn := p.opts.NewConnectionFn()
go func() {
defer wg.Done()
// Create connection
- channel, client, err := p.newConn(channelName, address, p.opts)
+ channel, client, err := newConnFn(channelName, address, p.opts)
if err != nil {
log.Debug("could not connect", zap.String("host", address), zap.Error(err))
return
@@ -296,17 +293,6 @@ func (p *connPool) healthCheckEvery(interval time.Duration, stutter time.Duratio
}
}
-func newConn(channelName string, address string, opts Options) (xclose.SimpleCloser, rpc.TChanNode, error) {
- channel, err := tchannel.NewChannel(channelName, opts.ChannelOptions())
- if err != nil {
- return nil, nil, err
- }
- endpoint := &thrift.ClientOptions{HostPort: address}
- thriftClient := thrift.NewClient(channel, nchannel.ChannelName, endpoint)
- client := rpc.NewTChanNodeClient(thriftClient)
- return channel, client, nil
-}
-
func healthCheck(client rpc.TChanNode, opts Options) error {
tctx, _ := thrift.NewContext(opts.HostConnectTimeout())
result, err := client.Health(tctx)
diff --git a/src/dbnode/client/connection_pool_test.go b/src/dbnode/client/connection_pool_test.go
index e2f43a8289..0724171f6f 100644
--- a/src/dbnode/client/connection_pool_test.go
+++ b/src/dbnode/client/connection_pool_test.go
@@ -82,14 +82,19 @@ func TestConnectionPoolConnectsAndRetriesConnects(t *testing.T) {
opts := newConnectionPoolTestOptions()
opts = opts.SetMaxConnectionCount(4)
- conns := newConnectionPool(h, opts).(*connPool)
- conns.newConn = func(ch string, addr string, opts Options) (xclose.SimpleCloser, rpc.TChanNode, error) {
+
+ fn := func(
+ ch string, addr string, opts Options,
+ ) (xclose.SimpleCloser, rpc.TChanNode, error) {
attempt := int(atomic.AddInt32(&attempts, 1))
if attempt == 1 {
return nil, nil, fmt.Errorf("a connect error")
}
return channelNone, nil, nil
}
+
+ opts = opts.SetNewConnectionFn(fn)
+ conns := newConnectionPool(h, opts).(*connPool)
conns.healthCheckNewConn = func(client rpc.TChanNode, opts Options) error {
if atomic.LoadInt32(&rounds) == 1 {
// If second round then fail health check
@@ -230,8 +235,9 @@ func TestConnectionPoolHealthChecks(t *testing.T) {
failsDoneWg[i].Add(1)
}
- conns := newConnectionPool(h, opts).(*connPool)
- conns.newConn = func(ch string, addr string, opts Options) (xclose.SimpleCloser, rpc.TChanNode, error) {
+ fn := func(
+ ch string, addr string, opts Options,
+ ) (xclose.SimpleCloser, rpc.TChanNode, error) {
attempt := atomic.AddInt32(&newConnAttempt, 1)
if attempt == 1 {
return channelNone, client1, nil
@@ -240,6 +246,9 @@ func TestConnectionPoolHealthChecks(t *testing.T) {
}
return nil, nil, fmt.Errorf("spawning only 2 connections")
}
+ opts = opts.SetNewConnectionFn(fn)
+
+ conns := newConnectionPool(h, opts).(*connPool)
conns.healthCheckNewConn = func(client rpc.TChanNode, opts Options) error {
return nil
}
diff --git a/src/dbnode/client/fetch_state.go b/src/dbnode/client/fetch_state.go
index dbc5131589..c991e19db5 100644
--- a/src/dbnode/client/fetch_state.go
+++ b/src/dbnode/client/fetch_state.go
@@ -28,8 +28,10 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/x/xpool"
+ xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/serialize"
)
@@ -47,7 +49,8 @@ const (
)
var (
- errFetchStateStillProcessing = errors.New("[invariant violated] fetch state is still processing, unable to create response")
+ errFetchStateStillProcessing = errors.New("[invariant violated] fetch " +
+ "state is still processing, unable to create response")
)
type fetchState struct {
@@ -134,6 +137,13 @@ func (f *fetchState) completionFn(
result interface{},
resultErr error,
) {
+ if IsBadRequestError(resultErr) {
+ // Wrap with invalid params and non-retryable so it is
+ // not retried.
+ resultErr = xerrors.NewInvalidParamsError(resultErr)
+ resultErr = xerrors.NewNonRetryableError(resultErr)
+ }
+
f.Lock()
defer func() {
f.Unlock()
@@ -175,7 +185,9 @@ func (f *fetchState) markDoneWithLock(err error) {
f.Signal()
}
-func (f *fetchState) asTaggedIDsIterator(pools fetchTaggedPools) (TaggedIDsIterator, FetchResponseMetadata, error) {
+func (f *fetchState) asTaggedIDsIterator(
+ pools fetchTaggedPools,
+) (TaggedIDsIterator, FetchResponseMetadata, error) {
f.Lock()
defer f.Unlock()
@@ -197,7 +209,11 @@ func (f *fetchState) asTaggedIDsIterator(pools fetchTaggedPools) (TaggedIDsItera
return f.tagResultAccumulator.AsTaggedIDsIterator(limit, pools)
}
-func (f *fetchState) asEncodingSeriesIterators(pools fetchTaggedPools, descr namespace.SchemaDescr) (encoding.SeriesIterators, FetchResponseMetadata, error) {
+func (f *fetchState) asEncodingSeriesIterators(
+ pools fetchTaggedPools,
+ descr namespace.SchemaDescr,
+ opts index.IterationOptions,
+) (encoding.SeriesIterators, FetchResponseMetadata, error) {
f.Lock()
defer f.Unlock()
@@ -216,7 +232,7 @@ func (f *fetchState) asEncodingSeriesIterators(pools fetchTaggedPools, descr nam
}
limit := f.fetchTaggedOp.requestLimit(maxInt)
- return f.tagResultAccumulator.AsEncodingSeriesIterators(limit, pools, descr)
+ return f.tagResultAccumulator.AsEncodingSeriesIterators(limit, pools, descr, opts)
}
func (f *fetchState) asAggregatedTagsIterator(pools fetchTaggedPools) (AggregatedTagsIterator, FetchResponseMetadata, error) {
diff --git a/src/dbnode/client/fetch_tagged_results_accumulator.go b/src/dbnode/client/fetch_tagged_results_accumulator.go
index 42531d6977..32f3b3aa2d 100644
--- a/src/dbnode/client/fetch_tagged_results_accumulator.go
+++ b/src/dbnode/client/fetch_tagged_results_accumulator.go
@@ -30,9 +30,11 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
)
type fetchTaggedResultAccumulatorOpts struct {
@@ -61,7 +63,7 @@ type fetchTaggedResultAccumulator struct {
numHostsPending int32
numShardsPending int32
- errors xerrors.Errors
+ errors []error
fetchResponses fetchTaggedIDResults
aggResponses aggregateResults
exhaustive bool
@@ -201,9 +203,18 @@ func (accum *fetchTaggedResultAccumulator) accumulatedResult(
// all shards, so we need to fail
if accum.numHostsPending == 0 && accum.numShardsPending != 0 {
doneAccumulating := true
- return doneAccumulating, fmt.Errorf(
- "unable to satisfy consistency requirements for %d shards [ err = %s ]",
- accum.numShardsPending, accum.errors.Error())
+ // NB(r): Use new renamed error to keep the underlying error
+ // (invalid/retryable) type.
+ err := fmt.Errorf("unable to satisfy consistency requirements: shards=%d, err=%v",
+ accum.numShardsPending, accum.errors)
+ for i := range accum.errors {
+ if IsBadRequestError(accum.errors[i]) {
+ err = xerrors.NewInvalidParamsError(err)
+ err = xerrors.NewNonRetryableError(err)
+ break
+ }
+ }
+ return doneAccumulating, err
}
doneAccumulating := false
@@ -267,6 +278,7 @@ func (accum *fetchTaggedResultAccumulator) sliceResponsesAsSeriesIter(
pools fetchTaggedPools,
elems fetchTaggedIDResults,
descr namespace.SchemaDescr,
+ opts index.IterationOptions,
) encoding.SeriesIterator {
numElems := len(elems)
iters := pools.MultiReaderIteratorArray().Get(numElems)[:numElems]
@@ -291,19 +303,21 @@ func (accum *fetchTaggedResultAccumulator) sliceResponsesAsSeriesIter(
nsID := pools.CheckedBytesWrapper().Get(elem.NameSpace)
seriesIter := pools.SeriesIterator().Get()
seriesIter.Reset(encoding.SeriesIteratorOptions{
- ID: pools.ID().BinaryID(tsID),
- Namespace: pools.ID().BinaryID(nsID),
- Tags: decoder,
- StartInclusive: accum.startTime,
- EndExclusive: accum.endTime,
- Replicas: iters,
+ ID: pools.ID().BinaryID(tsID),
+ Namespace: pools.ID().BinaryID(nsID),
+ Tags: decoder,
+ StartInclusive: xtime.ToUnixNano(accum.startTime),
+ EndExclusive: xtime.ToUnixNano(accum.endTime),
+ Replicas: iters,
+ SeriesIteratorConsolidator: opts.SeriesIteratorConsolidator,
})
return seriesIter
}
func (accum *fetchTaggedResultAccumulator) AsEncodingSeriesIterators(
- limit int, pools fetchTaggedPools, descr namespace.SchemaDescr,
+ limit int, pools fetchTaggedPools,
+ descr namespace.SchemaDescr, opts index.IterationOptions,
) (encoding.SeriesIterators, FetchResponseMetadata, error) {
results := fetchTaggedIDResultsSortedByID(accum.fetchResponses)
sort.Sort(results)
@@ -320,7 +334,7 @@ func (accum *fetchTaggedResultAccumulator) AsEncodingSeriesIterators(
count := 0
moreElems := false
accum.fetchResponses.forEachID(func(elems fetchTaggedIDResults, hasMore bool) bool {
- seriesIter := accum.sliceResponsesAsSeriesIter(pools, elems, descr)
+ seriesIter := accum.sliceResponsesAsSeriesIter(pools, elems, descr, opts)
result.SetAt(count, seriesIter)
count++
moreElems = hasMore
diff --git a/src/dbnode/client/fetch_tagged_results_accumulator_merge_test.go b/src/dbnode/client/fetch_tagged_results_accumulator_merge_test.go
index 13a7d6b592..9614dc9f56 100644
--- a/src/dbnode/client/fetch_tagged_results_accumulator_merge_test.go
+++ b/src/dbnode/client/fetch_tagged_results_accumulator_merge_test.go
@@ -29,6 +29,7 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/convert"
+ "github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/topology/testutil"
"github.com/m3db/m3/src/dbnode/ts"
@@ -175,7 +176,8 @@ func TestFetchTaggedResultsAccumulatorIdsMergeReportsExhaustiveCorrectly(t *test
matcher := newTestSerieses(1, 15).indexMatcher()
require.True(t, matcher.Matches(resultsIter))
- iters, meta, err := accum.AsEncodingSeriesIterators(100, th.pools, nil)
+ iters, meta, err := accum.AsEncodingSeriesIterators(100, th.pools,
+ nil, index.IterationOptions{})
require.NoError(t, err)
require.False(t, meta.Exhaustive)
newTestSerieses(1, 15).assertMatchesEncodingIters(t, iters)
@@ -229,7 +231,8 @@ func TestFetchTaggedResultsAccumulatorSeriesItersDatapoints(t *testing.T) {
matcher := newTestSerieses(1, 8).indexMatcher()
require.True(t, matcher.Matches(resultsIter))
- iters, meta, err := accum.AsEncodingSeriesIterators(10, th.pools, nil)
+ iters, meta, err := accum.AsEncodingSeriesIterators(10, th.pools,
+ nil, index.IterationOptions{})
require.NoError(t, err)
require.False(t, meta.Exhaustive)
append(sg0, sg1...).assertMatchesEncodingIters(t, iters)
@@ -283,7 +286,8 @@ func TestFetchTaggedResultsAccumulatorSeriesItersDatapointsNSplit(t *testing.T)
matcher := newTestSerieses(1, 8).indexMatcher()
require.True(t, matcher.Matches(resultsIter))
- iters, meta, err := accum.AsEncodingSeriesIterators(10, th.pools, nil)
+ iters, meta, err := accum.AsEncodingSeriesIterators(10, th.pools,
+ nil, index.IterationOptions{})
require.NoError(t, err)
require.True(t, meta.Exhaustive)
// ensure iters are valid after the lifecycle of the accumulator
diff --git a/src/dbnode/client/fetch_tagged_results_accumulator_misc_test.go b/src/dbnode/client/fetch_tagged_results_accumulator_misc_test.go
index 2eecde11b6..2eb8d6837b 100644
--- a/src/dbnode/client/fetch_tagged_results_accumulator_misc_test.go
+++ b/src/dbnode/client/fetch_tagged_results_accumulator_misc_test.go
@@ -33,6 +33,7 @@ import (
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
@@ -47,7 +48,9 @@ import (
func TestFetchTaggedResultsAccumulatorClearResetsState(t *testing.T) {
pools := newTestFetchTaggedPools()
accum := newFetchTaggedResultAccumulator()
- iter, meta, err := accum.AsEncodingSeriesIterators(100, pools, nil)
+
+ iter, meta, err := accum.AsEncodingSeriesIterators(100, pools, nil,
+ index.IterationOptions{})
require.NoError(t, err)
require.True(t, meta.Exhaustive)
require.Equal(t, 0, iter.Len())
@@ -291,7 +294,9 @@ func initTestFetchTaggedPools() *testFetchTaggedPools {
pools.checkedBytesWrapper = xpool.NewCheckedBytesWrapperPool(opts)
pools.checkedBytesWrapper.Init()
- pools.tagDecoder = serialize.NewTagDecoderPool(serialize.NewTagDecoderOptions(), opts)
+ pools.tagDecoder = serialize.NewTagDecoderPool(
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
+ opts)
pools.tagDecoder.Init()
return pools
diff --git a/src/dbnode/client/options.go b/src/dbnode/client/options.go
index 6957f6f89d..5ddfe73a35 100644
--- a/src/dbnode/client/options.go
+++ b/src/dbnode/client/options.go
@@ -32,18 +32,24 @@ import (
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/encoding/proto"
"github.com/m3db/m3/src/dbnode/environment"
+ "github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/namespace"
+ nchannel "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/node/channel"
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
+ "github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
+ xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
xretry "github.com/m3db/m3/src/x/retry"
+ "github.com/m3db/m3/src/x/sampler"
"github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
tchannel "github.com/uber/tchannel-go"
+ "github.com/uber/tchannel-go/thrift"
)
const (
@@ -213,6 +219,7 @@ type options struct {
runtimeOptsMgr m3dbruntime.OptionsManager
clockOpts clock.Options
instrumentOpts instrument.Options
+ logErrorSampleRate sampler.Rate
topologyInitializer topology.Initializer
readConsistencyLevel topology.ReadConsistencyLevel
writeConsistencyLevel topology.ConsistencyLevel
@@ -239,6 +246,7 @@ type options struct {
writeRetrier xretry.Retrier
fetchRetrier xretry.Retrier
streamBlocksRetrier xretry.Retrier
+ newConnectionFn NewConnectionFn
readerIteratorAllocate encoding.ReaderIteratorAllocate
writeOperationPoolSize int
writeTaggedOperationPoolSize int
@@ -265,6 +273,8 @@ type options struct {
asyncWriteWorkerPool xsync.PooledWorkerPool
asyncWriteMaxConcurrency int
useV2BatchAPIs bool
+ iterationOptions index.IterationOptions
+ writeTimestampOffset time.Duration
}
// NewOptions creates a new set of client options with defaults
@@ -294,6 +304,19 @@ func NewOptionsForAsyncClusters(opts Options, topoInits []topology.Initializer,
return result
}
+func defaultNewConnectionFn(
+ channelName string, address string, opts Options,
+) (xclose.SimpleCloser, rpc.TChanNode, error) {
+ channel, err := tchannel.NewChannel(channelName, opts.ChannelOptions())
+ if err != nil {
+ return nil, nil, err
+ }
+ endpoint := &thrift.ClientOptions{HostPort: address}
+ thriftClient := thrift.NewClient(channel, nchannel.ChannelName, endpoint)
+ client := rpc.NewTChanNodeClient(thriftClient)
+ return channel, client, nil
+}
+
func newOptions() *options {
buckets := defaultIdentifierPoolBytesPoolSizes
bytesPool := pool.NewCheckedBytesPool(buckets, nil,
@@ -341,8 +364,9 @@ func newOptions() *options {
tagEncoderPoolSize: defaultTagEncoderPoolSize,
tagEncoderOpts: serialize.NewTagEncoderOptions(),
tagDecoderPoolSize: defaultTagDecoderPoolSize,
- tagDecoderOpts: serialize.NewTagDecoderOptions(),
+ tagDecoderOpts: serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
streamBlocksRetrier: defaultStreamBlocksRetrier,
+ newConnectionFn: defaultNewConnectionFn,
writeOperationPoolSize: defaultWriteOpPoolSize,
writeTaggedOperationPoolSize: defaultWriteTaggedOpPoolSize,
fetchBatchOpPoolSize: defaultFetchBatchOpPoolSize,
@@ -391,9 +415,12 @@ func validate(opts *options) error {
); err != nil {
return err
}
- return topology.ValidateConnectConsistencyLevel(
+ if err := topology.ValidateConnectConsistencyLevel(
opts.clusterConnectConsistencyLevel,
- )
+ ); err != nil {
+ return err
+ }
+ return opts.logErrorSampleRate.Validate()
}
func (o *options) Validate() error {
@@ -452,6 +479,16 @@ func (o *options) InstrumentOptions() instrument.Options {
return o.instrumentOpts
}
+func (o *options) SetLogErrorSampleRate(value sampler.Rate) Options {
+ opts := *o
+ opts.logErrorSampleRate = value
+ return &opts
+}
+
+func (o *options) LogErrorSampleRate() sampler.Rate {
+ return o.logErrorSampleRate
+}
+
func (o *options) SetTopologyInitializer(value topology.Initializer) Options {
opts := *o
opts.topologyInitializer = value
@@ -712,6 +749,16 @@ func (o *options) StreamBlocksRetrier() xretry.Retrier {
return o.streamBlocksRetrier
}
+func (o *options) SetNewConnectionFn(value NewConnectionFn) AdminOptions {
+ opts := *o
+ opts.newConnectionFn = value
+ return &opts
+}
+
+func (o *options) NewConnectionFn() NewConnectionFn {
+ return o.newConnectionFn
+}
+
func (o *options) SetWriteOpPoolSize(value int) Options {
opts := *o
opts.writeOperationPoolSize = value
@@ -961,3 +1008,23 @@ func (o *options) SetUseV2BatchAPIs(value bool) Options {
func (o *options) UseV2BatchAPIs() bool {
return o.useV2BatchAPIs
}
+
+func (o *options) SetIterationOptions(value index.IterationOptions) Options {
+ opts := *o
+ opts.iterationOptions = value
+ return &opts
+}
+
+func (o *options) IterationOptions() index.IterationOptions {
+ return o.iterationOptions
+}
+
+func (o *options) SetWriteTimestampOffset(value time.Duration) AdminOptions {
+ opts := *o
+ opts.writeTimestampOffset = value
+ return &opts
+}
+
+func (o *options) WriteTimestampOffset() time.Duration {
+ return o.writeTimestampOffset
+}
diff --git a/src/dbnode/client/reader_slice_of_slices_iterator.go b/src/dbnode/client/reader_slice_of_slices_iterator.go
index f7973f2a57..d6e6f78289 100644
--- a/src/dbnode/client/reader_slice_of_slices_iterator.go
+++ b/src/dbnode/client/reader_slice_of_slices_iterator.go
@@ -60,7 +60,7 @@ func (it *readerSliceOfSlicesIterator) Next() bool {
if len(it.blockReaders) < currLen {
diff := currLen - len(it.blockReaders)
for i := 0; i < diff; i++ {
- seg := ts.NewSegment(nil, nil, ts.FinalizeNone)
+ seg := ts.NewSegment(nil, nil, 0, ts.FinalizeNone)
sr := xio.NewSegmentReader(seg)
br := xio.BlockReader{
SegmentReader: sr,
@@ -112,7 +112,14 @@ func (it *readerSliceOfSlicesIterator) resetReader(
} else {
tail.Reset(seg.Tail)
}
- r.ResetWindowed(ts.NewSegment(head, tail, ts.FinalizeNone), start, end)
+
+ var checksum uint32
+ if seg.Checksum != nil {
+ checksum = uint32(*seg.Checksum)
+ }
+
+ newSeg := ts.NewSegment(head, tail, checksum, ts.FinalizeNone)
+ r.ResetWindowed(newSeg, start, end)
}
func (it *readerSliceOfSlicesIterator) currentLen() int {
@@ -182,7 +189,7 @@ func (it *readerSliceOfSlicesIterator) Close() {
func (it *readerSliceOfSlicesIterator) Reset(segments []*rpc.Segments) {
it.segments = segments
- it.idx = -1
+ it.resetIndex()
it.closed = false
}
@@ -197,3 +204,11 @@ func (it *readerSliceOfSlicesIterator) Size() (int, error) {
}
return size, nil
}
+
+func (it *readerSliceOfSlicesIterator) Rewind() {
+ it.resetIndex()
+}
+
+func (it *readerSliceOfSlicesIterator) resetIndex() {
+ it.idx = -1
+}
diff --git a/src/dbnode/client/received_blocks_new_map.go b/src/dbnode/client/received_blocks_new_map.go
index 588149c610..7a32344192 100644
--- a/src/dbnode/client/received_blocks_new_map.go
+++ b/src/dbnode/client/received_blocks_new_map.go
@@ -24,7 +24,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
func newReceivedBlocksMap(pool pool.BytesPool) *receivedBlocksMap {
diff --git a/src/dbnode/client/replicated_session.go b/src/dbnode/client/replicated_session.go
index 9309abdef9..5b2f95191e 100644
--- a/src/dbnode/client/replicated_session.go
+++ b/src/dbnode/client/replicated_session.go
@@ -51,12 +51,14 @@ type replicatedSession struct {
log *zap.Logger
metrics replicatedSessionMetrics
outCh chan error
+ writeTimestampOffset time.Duration
}
type replicatedSessionMetrics struct {
replicateExecuted tally.Counter
replicateNotExecuted tally.Counter
replicateError tally.Counter
+ replicateSuccess tally.Counter
}
func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics {
@@ -64,6 +66,7 @@ func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics {
replicateExecuted: scope.Counter("replicate.executed"),
replicateNotExecuted: scope.Counter("replicate.not-executed"),
replicateError: scope.Counter("replicate.error"),
+ replicateSuccess: scope.Counter("replicate.success"),
}
}
@@ -90,6 +93,7 @@ func newReplicatedSession(opts Options, asyncOpts []Options, options ...replicat
scope: scope,
log: opts.InstrumentOptions().Logger(),
metrics: newReplicatedSessionMetrics(scope),
+ writeTimestampOffset: opts.WriteTimestampOffset(),
}
// Apply options
@@ -166,6 +170,8 @@ func (s replicatedSession) replicate(params replicatedParams) error {
if err != nil {
s.metrics.replicateError.Inc(1)
s.log.Error("could not replicate write", zap.Error(err))
+ } else {
+ s.metrics.replicateSuccess.Inc(1)
}
if s.outCh != nil {
s.outCh <- err
@@ -189,7 +195,7 @@ func (s replicatedSession) Write(namespace, id ident.ID, t time.Time, value floa
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
- t: t,
+ t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
@@ -201,7 +207,7 @@ func (s replicatedSession) WriteTagged(namespace, id ident.ID, tags ident.TagIte
return s.replicate(replicatedParams{
namespace: namespace,
id: id,
- t: t,
+ t: t.Add(-s.writeTimestampOffset),
value: value,
unit: unit,
annotation: annotation,
diff --git a/src/dbnode/client/session.go b/src/dbnode/client/session.go
index f5ce72f26b..c85fe493cf 100644
--- a/src/dbnode/client/session.go
+++ b/src/dbnode/client/session.go
@@ -55,6 +55,7 @@ import (
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
xretry "github.com/m3db/m3/src/x/retry"
+ "github.com/m3db/m3/src/x/sampler"
"github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
@@ -141,6 +142,8 @@ type session struct {
scope tally.Scope
nowFn clock.NowFn
log *zap.Logger
+ logWriteErrorSampler *sampler.Sampler
+ logFetchErrorSampler *sampler.Sampler
newHostQueueFn newHostQueueFn
writeRetrier xretry.Retrier
fetchRetrier xretry.Retrier
@@ -167,12 +170,14 @@ type shardMetricsKey struct {
type sessionMetrics struct {
sync.RWMutex
writeSuccess tally.Counter
- writeErrors tally.Counter
+ writeErrorsBadRequest tally.Counter
+ writeErrorsInternalError tally.Counter
writeLatencyHistogram tally.Histogram
writeNodesRespondingErrors []tally.Counter
writeNodesRespondingBadRequestErrors []tally.Counter
fetchSuccess tally.Counter
- fetchErrors tally.Counter
+ fetchErrorsBadRequest tally.Counter
+ fetchErrorsInternalError tally.Counter
fetchLatencyHistogram tally.Histogram
fetchNodesRespondingErrors []tally.Counter
fetchNodesRespondingBadRequestErrors []tally.Counter
@@ -183,11 +188,21 @@ type sessionMetrics struct {
func newSessionMetrics(scope tally.Scope) sessionMetrics {
return sessionMetrics{
- writeSuccess: scope.Counter("write.success"),
- writeErrors: scope.Counter("write.errors"),
- writeLatencyHistogram: histogramWithDurationBuckets(scope, "write.latency"),
- fetchSuccess: scope.Counter("fetch.success"),
- fetchErrors: scope.Counter("fetch.errors"),
+ writeSuccess: scope.Counter("write.success"),
+ writeErrorsBadRequest: scope.Tagged(map[string]string{
+ "error_type": "bad_request",
+ }).Counter("write.errors"),
+ writeErrorsInternalError: scope.Tagged(map[string]string{
+ "error_type": "internal_error",
+ }).Counter("write.errors"),
+ writeLatencyHistogram: histogramWithDurationBuckets(scope, "write.latency"),
+ fetchSuccess: scope.Counter("fetch.success"),
+ fetchErrorsBadRequest: scope.Tagged(map[string]string{
+ "error_type": "bad_request",
+ }).Counter("fetch.errors"),
+ fetchErrorsInternalError: scope.Tagged(map[string]string{
+ "error_type": "internal_error",
+ }).Counter("fetch.errors"),
fetchLatencyHistogram: histogramWithDurationBuckets(scope, "fetch.latency"),
topologyUpdatedSuccess: scope.Counter("topology.updated-success"),
topologyUpdatedError: scope.Counter("topology.updated-error"),
@@ -239,6 +254,16 @@ func newSession(opts Options) (clientSession, error) {
return nil, err
}
+ logWriteErrorSampler, err := sampler.NewSampler(opts.LogErrorSampleRate())
+ if err != nil {
+ return nil, err
+ }
+
+ logFetchErrorSampler, err := sampler.NewSampler(opts.LogErrorSampleRate())
+ if err != nil {
+ return nil, err
+ }
+
scope := opts.InstrumentOptions().MetricsScope()
s := &session{
@@ -252,6 +277,8 @@ func newSession(opts Options) (clientSession, error) {
scope: scope,
nowFn: opts.ClockOptions().NowFn(),
log: opts.InstrumentOptions().Logger(),
+ logWriteErrorSampler: logWriteErrorSampler,
+ logFetchErrorSampler: logFetchErrorSampler,
newHostQueueFn: newHostQueue,
fetchBatchSize: opts.FetchBatchSize(),
newPeerBlocksQueueFn: newPeerBlocksQueue,
@@ -425,10 +452,18 @@ func (s *session) recordWriteMetrics(consistencyResultErr error, respErrs int32,
}
if consistencyResultErr == nil {
s.metrics.writeSuccess.Inc(1)
+ } else if IsBadRequestError(consistencyResultErr) {
+ s.metrics.writeErrorsBadRequest.Inc(1)
} else {
- s.metrics.writeErrors.Inc(1)
+ s.metrics.writeErrorsInternalError.Inc(1)
}
s.metrics.writeLatencyHistogram.RecordDuration(s.nowFn().Sub(start))
+
+ if consistencyResultErr != nil && s.logWriteErrorSampler.Sample() {
+ s.log.Error("m3db client write error occurred",
+ zap.Float64("sampleRateLog", s.logWriteErrorSampler.SampleRate().Value()),
+ zap.Error(consistencyResultErr))
+ }
}
func (s *session) recordFetchMetrics(consistencyResultErr error, respErrs int32, start time.Time) {
@@ -441,10 +476,18 @@ func (s *session) recordFetchMetrics(consistencyResultErr error, respErrs int32,
}
if consistencyResultErr == nil {
s.metrics.fetchSuccess.Inc(1)
+ } else if IsBadRequestError(consistencyResultErr) {
+ s.metrics.fetchErrorsBadRequest.Inc(1)
} else {
- s.metrics.fetchErrors.Inc(1)
+ s.metrics.fetchErrorsInternalError.Inc(1)
}
s.metrics.fetchLatencyHistogram.RecordDuration(s.nowFn().Sub(start))
+
+ if consistencyResultErr != nil && s.logFetchErrorSampler.Sample() {
+ s.log.Error("m3db client fetch error occurred",
+ zap.Float64("sampleRateLog", s.logFetchErrorSampler.SampleRate().Value()),
+ zap.Error(consistencyResultErr))
+ }
}
func (s *session) nodesRespondingErrorsMetricIndex(respErrs int32) int32 {
@@ -1297,7 +1340,8 @@ func (s *session) fetchTaggedAttempt(
// must Unlock before calling `asEncodingSeriesIterators` as the latter needs to acquire
// the fetchState Lock
fetchState.Unlock()
- iters, metadata, err := fetchState.asEncodingSeriesIterators(s.pools, nsCtx.Schema)
+ iters, metadata, err := fetchState.asEncodingSeriesIterators(
+ s.pools, nsCtx.Schema, s.opts.IterationOptions())
// must Unlock() before decRef'ing, as the latter releases the fetchState back into a
// pool if ref count == 0.
@@ -1578,6 +1622,7 @@ func (s *session) fetchIDsAttempt(
// Avoid decoding more data than is required to satisfy the consistency guarantees.
numItersToInclude = numDesired
}
+
itersToInclude := results[:numItersToInclude]
resultsLock.RUnlock()
@@ -1588,12 +1633,14 @@ func (s *session) fetchIDsAttempt(
// due to a pending request in queue.
seriesID := s.pools.id.Clone(tsID)
namespaceID := s.pools.id.Clone(namespace)
+ consolidator := s.opts.IterationOptions().SeriesIteratorConsolidator
iter.Reset(encoding.SeriesIteratorOptions{
- ID: seriesID,
- Namespace: namespaceID,
- StartInclusive: startInclusive,
- EndExclusive: endExclusive,
- Replicas: itersToInclude,
+ ID: seriesID,
+ Namespace: namespaceID,
+ StartInclusive: xtime.ToUnixNano(startInclusive),
+ EndExclusive: xtime.ToUnixNano(endExclusive),
+ Replicas: itersToInclude,
+ SeriesIteratorConsolidator: consolidator,
})
iters.SetAt(idx, iter)
}
@@ -1611,6 +1658,12 @@ func (s *session) fetchIDsAttempt(
completionFn := func(result interface{}, err error) {
var snapshotSuccess int32
if err != nil {
+ if IsBadRequestError(err) {
+ // Wrap with invalid params and non-retryable so it is
+ // not retried.
+ err = xerrors.NewInvalidParamsError(err)
+ err = xerrors.NewNonRetryableError(err)
+ }
atomic.AddInt32(&errs, 1)
// NB(r): reuse the error lock here as we do not want to create
// a whole lot of locks for every single ID fetched due to size
@@ -3261,7 +3314,12 @@ func (b *baseBlocksResult) segmentForBlock(seg *rpc.Segment) ts.Segment {
tail.AppendAll(seg.Tail)
tail.DecRef()
}
- return ts.NewSegment(head, tail, ts.FinalizeHead&ts.FinalizeTail)
+ var checksum uint32
+ if seg.Checksum != nil {
+ checksum = uint32(*seg.Checksum)
+ }
+
+ return ts.NewSegment(head, tail, checksum, ts.FinalizeHead&ts.FinalizeTail)
}
func (b *baseBlocksResult) mergeReaders(start time.Time, blockSize time.Duration, readers []xio.SegmentReader) (encoding.Encoder, error) {
diff --git a/src/dbnode/client/session_fetch_bulk_blocks_test.go b/src/dbnode/client/session_fetch_bulk_blocks_test.go
index 1171f93f37..db4d1afecd 100644
--- a/src/dbnode/client/session_fetch_bulk_blocks_test.go
+++ b/src/dbnode/client/session_fetch_bulk_blocks_test.go
@@ -63,9 +63,11 @@ var (
nsRetentionOpts = retention.NewOptions().
SetBlockSize(blockSize).
SetRetentionPeriod(48 * blockSize)
- testTagDecodingPool = serialize.NewTagDecoderPool(serialize.NewTagDecoderOptions(),
+ testTagDecodingPool = serialize.NewTagDecoderPool(
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
pool.NewObjectPoolOptions().SetSize(1))
- testTagEncodingPool = serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(),
+ testTagEncodingPool = serialize.NewTagEncoderPool(
+ serialize.NewTagEncoderOptions(),
pool.NewObjectPoolOptions().SetSize(1))
testIDPool = newSessionTestOptions().IdentifierPool()
fooID = ident.StringID("foo")
@@ -120,9 +122,7 @@ func newSessionTestAdminOptions() AdminOptions {
func newResultTestOptions() result.Options {
opts := result.NewOptions()
encoderPool := encoding.NewEncoderPool(nil)
- encoderPool.Init(func() encoding.Encoder {
- return &testEncoder{}
- })
+ encoderPool.Init(encoding.NewNullEncoder)
return opts.SetDatabaseBlockOptions(opts.DatabaseBlockOptions().
SetEncoderPool(encoderPool))
}
@@ -2531,59 +2531,3 @@ func assertEnqueueChannel(
close(enqueueCh.peersMetadataCh)
}
-
-type testEncoder struct {
- start time.Time
- data ts.Segment
- sealed bool
- closed bool
-}
-
-func (e *testEncoder) SetSchema(descr namespace.SchemaDescr) {}
-
-func (e *testEncoder) Encode(dp ts.Datapoint, timeUnit xtime.Unit, annotation ts.Annotation) error {
- return fmt.Errorf("not implemented")
-}
-
-func (e *testEncoder) Stream(ctx context.Context) (xio.SegmentReader, bool) {
- return xio.NewSegmentReader(e.data), true
-}
-
-func (e *testEncoder) NumEncoded() int {
- return 0
-}
-
-func (e *testEncoder) LastEncoded() (ts.Datapoint, error) {
- return ts.Datapoint{}, fmt.Errorf("not implemented")
-}
-
-func (e *testEncoder) Len() int {
- return e.data.Len()
-}
-
-func (e *testEncoder) Seal() {
- e.sealed = true
-}
-
-func (e *testEncoder) Reset(t time.Time, capacity int, descr namespace.SchemaDescr) {
- e.start = t
- e.data = ts.Segment{}
-}
-
-func (e *testEncoder) Close() {
- e.closed = true
-}
-
-func (e *testEncoder) Discard() ts.Segment {
- data := e.data
- e.closed = true
- e.data = ts.Segment{}
- return data
-}
-
-func (e *testEncoder) DiscardReset(t time.Time, capacity int, descr namespace.SchemaDescr) ts.Segment {
- curr := e.data
- e.start = t
- e.data = ts.Segment{}
- return curr
-}
diff --git a/src/dbnode/client/session_fetch_high_concurrency_test.go b/src/dbnode/client/session_fetch_high_concurrency_test.go
index 5e6a262d07..77a07ea747 100644
--- a/src/dbnode/client/session_fetch_high_concurrency_test.go
+++ b/src/dbnode/client/session_fetch_high_concurrency_test.go
@@ -100,8 +100,9 @@ func TestSessionFetchIDsHighConcurrency(t *testing.T) {
// Override the new connection function for connection pools
// to be able to mock the entire end to end pipeline
- prevGlobalNewConn := globalNewConn
- globalNewConn = func(_ string, addr string, _ Options) (xclose.SimpleCloser, rpc.TChanNode, error) {
+ newConnFn := func(
+ _ string, addr string, _ Options,
+ ) (xclose.SimpleCloser, rpc.TChanNode, error) {
mockClient := rpc.NewMockTChanNode(ctrl)
mockClient.EXPECT().Health(gomock.Any()).
Return(healthCheckResult, nil).
@@ -111,8 +112,6 @@ func TestSessionFetchIDsHighConcurrency(t *testing.T) {
AnyTimes()
return noopCloser{}, mockClient, nil
}
- defer func() { globalNewConn = prevGlobalNewConn }()
-
shards := make([]shard.Shard, numShards)
for i := range shards {
shards[i] = shard.NewShard(uint32(i)).SetState(shard.Available)
@@ -165,6 +164,7 @@ func TestSessionFetchIDsHighConcurrency(t *testing.T) {
opts := newSessionTestOptions().
SetFetchBatchSize(128).
+ SetNewConnectionFn(newConnFn).
SetTopologyInitializer(topology.NewStaticInitializer(
topology.NewStaticOptions().
SetReplicas(numReplicas).
diff --git a/src/dbnode/client/session_fetch_test.go b/src/dbnode/client/session_fetch_test.go
index b9cb9fc651..f1b81dbc27 100644
--- a/src/dbnode/client/session_fetch_test.go
+++ b/src/dbnode/client/session_fetch_test.go
@@ -532,8 +532,9 @@ func fulfillFetchBatchOps(
}
for _, value := range f.values {
dp := ts.Datapoint{
- Timestamp: value.t,
- Value: value.value,
+ Timestamp: value.t,
+ TimestampNanos: xtime.ToUnixNano(value.t),
+ Value: value.value,
}
encoder.Encode(dp, value.unit, value.annotation)
}
diff --git a/src/dbnode/client/session_proto_test.go b/src/dbnode/client/session_proto_test.go
index a099a821f2..5dfc6a5ecc 100644
--- a/src/dbnode/client/session_proto_test.go
+++ b/src/dbnode/client/session_proto_test.go
@@ -133,8 +133,8 @@ func TestProtoSeriesIteratorRoundtrip(t *testing.T) {
seriesIter.Reset(encoding.SeriesIteratorOptions{
ID: ident.StringID("test_series_id"),
Namespace: testNamespace,
- StartInclusive: data[0].t,
- EndExclusive: start.Add(4 * time.Second),
+ StartInclusive: xtime.ToUnixNano(data[0].t),
+ EndExclusive: xtime.ToUnixNano(start.Add(4 * time.Second)),
Replicas: []encoding.MultiReaderIterator{multiIter},
})
diff --git a/src/dbnode/client/session_write_tagged_test.go b/src/dbnode/client/session_write_tagged_test.go
index 31198a0e89..a43e221e54 100644
--- a/src/dbnode/client/session_write_tagged_test.go
+++ b/src/dbnode/client/session_write_tagged_test.go
@@ -32,12 +32,12 @@ import (
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/topology"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
- "github.com/m3db/m3/src/x/serialize"
- xm3test "github.com/m3db/m3/src/x/test"
"github.com/m3db/m3/src/x/checked"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ "github.com/m3db/m3/src/x/serialize"
+ xm3test "github.com/m3db/m3/src/x/test"
xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
@@ -618,3 +618,4 @@ func (e *erroredTagIter) Close() {}
func (e *erroredTagIter) Len() int { return 0 }
func (e *erroredTagIter) Remaining() int { return 0 }
func (e *erroredTagIter) Duplicate() ident.TagIterator { return e }
+func (e *erroredTagIter) Rewind() {}
diff --git a/src/dbnode/client/types.go b/src/dbnode/client/types.go
index 6ed517fc0e..f30a196671 100644
--- a/src/dbnode/client/types.go
+++ b/src/dbnode/client/types.go
@@ -37,6 +37,7 @@ import (
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
xretry "github.com/m3db/m3/src/x/retry"
+ "github.com/m3db/m3/src/x/sampler"
"github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
@@ -272,6 +273,12 @@ type Options interface {
// InstrumentOptions returns the instrumentation options.
InstrumentOptions() instrument.Options
+ // SetLogErrorSampleRate sets the log error sample rate between [0,1.0].
+ SetLogErrorSampleRate(value sampler.Rate) Options
+
+ // LogErrorSampleRate returns the log error sample rate between [0,1.0].
+ LogErrorSampleRate() sampler.Rate
+
// SetTopologyInitializer sets the TopologyInitializer.
SetTopologyInitializer(value topology.Initializer) Options
@@ -547,6 +554,24 @@ type Options interface {
// UseV2BatchAPIs returns whether the V2 batch APIs should be used.
UseV2BatchAPIs() bool
+
+ // SetIterationOptions sets experimental iteration options.
+ SetIterationOptions(index.IterationOptions) Options
+
+ // IterationOptions returns experimental iteration options.
+ IterationOptions() index.IterationOptions
+
+ // SetWriteTimestampOffset sets the write timestamp offset.
+ SetWriteTimestampOffset(value time.Duration) AdminOptions
+
+ // WriteTimestampOffset returns the write timestamp offset.
+ WriteTimestampOffset() time.Duration
+
+ // SetNewConnectionFn sets a new connection generator function.
+ SetNewConnectionFn(value NewConnectionFn) AdminOptions
+
+ // NewConnectionFn returns the new connection generator function.
+ NewConnectionFn() NewConnectionFn
}
// AdminOptions is a set of administration client options.
diff --git a/src/dbnode/client/write_state.go b/src/dbnode/client/write_state.go
index 1b91bb181c..3af5d518f6 100644
--- a/src/dbnode/client/write_state.go
+++ b/src/dbnode/client/write_state.go
@@ -26,10 +26,10 @@ import (
"github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/dbnode/topology"
- "github.com/m3db/m3/src/x/serialize"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
+ "github.com/m3db/m3/src/x/serialize"
)
// writeOp represents a generic write operation
@@ -115,6 +115,12 @@ func (w *writeState) completionFn(result interface{}, err error) {
var wErr error
if err != nil {
+ if IsBadRequestError(err) {
+ // Wrap with invalid params and non-retryable so it is
+ // not retried.
+ err = xerrors.NewInvalidParamsError(err)
+ err = xerrors.NewNonRetryableError(err)
+ }
wErr = xerrors.NewRenamedError(err, fmt.Errorf("error writing to host %s: %v", hostID, err))
} else if hostShardSet, ok := w.topoMap.LookupHostShardSet(hostID); !ok {
errStr := "missing host shard in writeState completionFn: %s"
diff --git a/src/dbnode/config/2node/init_m3db_topology.sh b/src/dbnode/config/2node/init_m3db_topology.sh
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/dbnode/config/m3dbnode-all-config.yml b/src/dbnode/config/m3dbnode-all-config.yml
index 9c3064ab92..4b62a4d565 100644
--- a/src/dbnode/config/m3dbnode-all-config.yml
+++ b/src/dbnode/config/m3dbnode-all-config.yml
@@ -156,106 +156,6 @@ db:
throttle: 2m
checkInterval: 1m
- # Configuration for various different object pools that M3DB uses.
- pooling:
- blockAllocSize: 16
- type: simple
- seriesPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- blockPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- encoderPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- closersPool:
- size: 104857
- lowWatermark: 0.7
- highWatermark: 1.0
- contextPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- segmentReaderPool:
- size: 16384
- lowWatermark: 0.7
- highWatermark: 1.0
- iteratorPool:
- size: 2048
- lowWatermark: 0.7
- highWatermark: 1.0
- fetchBlockMetadataResultsPool:
- size: 65536
- capacity: 32
- lowWatermark: 0.7
- highWatermark: 1.0
- fetchBlocksMetadataResultsPool:
- size: 32
- capacity: 4096
- lowWatermark: 0.7
- highWatermark: 1.0
- replicaMetadataSlicePool:
- size: 131072
- capacity: 3
- lowWatermark: 0.7
- highWatermark: 1.0
- blockMetadataPool:
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- blockMetadataSlicePool:
- size: 65536
- capacity: 32
- lowWatermark: 0.7
- highWatermark: 1.0
- blocksMetadataPool:
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- blocksMetadataSlicePool:
- size: 32
- capacity: 4096
- lowWatermark: 0.7
- highWatermark: 1.0
- identifierPool:
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- bytesPool:
- buckets:
- - capacity: 16
- size: 524288
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 32
- size: 262144
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 64
- size: 131072
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 128
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 256
- size: 65536
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 1440
- size: 16384
- lowWatermark: 0.7
- highWatermark: 1.0
- - capacity: 4096
- size: 8192
- lowWatermark: 0.7
- highWatermark: 1.0
-
# etcd configuration.
config:
service:
diff --git a/src/dbnode/digest/digest.go b/src/dbnode/digest/digest.go
index 73734385fb..653a50103a 100644
--- a/src/dbnode/digest/digest.go
+++ b/src/dbnode/digest/digest.go
@@ -23,7 +23,6 @@ package digest
import (
"hash/adler32"
- "github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/stackadler32"
)
@@ -33,19 +32,6 @@ func NewDigest() stackadler32.Digest {
return stackadler32.NewDigest()
}
-// SegmentChecksum returns the 32-bit checksum for a segment
-// avoiding any allocations.
-func SegmentChecksum(segment ts.Segment) uint32 {
- d := stackadler32.NewDigest()
- if segment.Head != nil {
- d = d.Update(segment.Head.Bytes())
- }
- if segment.Tail != nil {
- d = d.Update(segment.Tail.Bytes())
- }
- return d.Sum32()
-}
-
// Checksum returns the checksum for a buffer.
func Checksum(buf []byte) uint32 {
return adler32.Checksum(buf)
diff --git a/src/dbnode/digest/digest_mock.go b/src/dbnode/digest/digest_mock.go
index 623fe483f9..8892e8bb4d 100644
--- a/src/dbnode/digest/digest_mock.go
+++ b/src/dbnode/digest/digest_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/digest (interfaces: ReaderWithDigest)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/dbnode/encoding/encoding.go b/src/dbnode/encoding/encoding.go
index ed9a2f62ec..4d65f5e921 100644
--- a/src/dbnode/encoding/encoding.go
+++ b/src/dbnode/encoding/encoding.go
@@ -52,7 +52,7 @@ func LeadingAndTrailingZeros(v uint64) (int, int) {
}
// SignExtend sign extends the highest bit of v which has numBits (<=64)
-func SignExtend(v uint64, numBits int) int64 {
- shift := uint(64 - numBits)
+func SignExtend(v uint64, numBits uint) int64 {
+ shift := 64 - numBits
return (int64(v) << shift) >> shift
}
diff --git a/src/dbnode/encoding/encoding_mock.go b/src/dbnode/encoding/encoding_mock.go
index 192bd8053f..9d779fa176 100644
--- a/src/dbnode/encoding/encoding_mock.go
+++ b/src/dbnode/encoding/encoding_mock.go
@@ -136,6 +136,21 @@ func (mr *MockEncoderMockRecorder) LastEncoded() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastEncoded", reflect.TypeOf((*MockEncoder)(nil).LastEncoded))
}
+// LastAnnotation mocks base method
+func (m *MockEncoder) LastAnnotation() (ts.Annotation, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LastAnnotation")
+ ret0, _ := ret[0].(ts.Annotation)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// LastAnnotation indicates an expected call of LastAnnotation
+func (mr *MockEncoderMockRecorder) LastAnnotation() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAnnotation", reflect.TypeOf((*MockEncoder)(nil).LastAnnotation))
+}
+
// Len mocks base method
func (m *MockEncoder) Len() int {
m.ctrl.T.Helper()
@@ -254,7 +269,7 @@ func (mr *MockOptionsMockRecorder) DefaultTimeUnit() *gomock.Call {
}
// SetTimeEncodingSchemes mocks base method
-func (m *MockOptions) SetTimeEncodingSchemes(value TimeEncodingSchemes) Options {
+func (m *MockOptions) SetTimeEncodingSchemes(value map[time0.Unit]TimeEncodingScheme) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetTimeEncodingSchemes", value)
ret0, _ := ret[0].(Options)
@@ -820,6 +835,237 @@ func (mr *MockMultiReaderIteratorMockRecorder) Readers() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Readers", reflect.TypeOf((*MockMultiReaderIterator)(nil).Readers))
}
+// Schema mocks base method
+func (m *MockMultiReaderIterator) Schema() namespace.SchemaDescr {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Schema")
+ ret0, _ := ret[0].(namespace.SchemaDescr)
+ return ret0
+}
+
+// Schema indicates an expected call of Schema
+func (mr *MockMultiReaderIteratorMockRecorder) Schema() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Schema", reflect.TypeOf((*MockMultiReaderIterator)(nil).Schema))
+}
+
+// MockSeriesIteratorAccumulator is a mock of SeriesIteratorAccumulator interface
+type MockSeriesIteratorAccumulator struct {
+ ctrl *gomock.Controller
+ recorder *MockSeriesIteratorAccumulatorMockRecorder
+}
+
+// MockSeriesIteratorAccumulatorMockRecorder is the mock recorder for MockSeriesIteratorAccumulator
+type MockSeriesIteratorAccumulatorMockRecorder struct {
+ mock *MockSeriesIteratorAccumulator
+}
+
+// NewMockSeriesIteratorAccumulator creates a new mock instance
+func NewMockSeriesIteratorAccumulator(ctrl *gomock.Controller) *MockSeriesIteratorAccumulator {
+ mock := &MockSeriesIteratorAccumulator{ctrl: ctrl}
+ mock.recorder = &MockSeriesIteratorAccumulatorMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockSeriesIteratorAccumulator) EXPECT() *MockSeriesIteratorAccumulatorMockRecorder {
+ return m.recorder
+}
+
+// Next mocks base method
+func (m *MockSeriesIteratorAccumulator) Next() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Next")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// Next indicates an expected call of Next
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Next() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Next))
+}
+
+// Current mocks base method
+func (m *MockSeriesIteratorAccumulator) Current() (ts.Datapoint, time0.Unit, ts.Annotation) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Current")
+ ret0, _ := ret[0].(ts.Datapoint)
+ ret1, _ := ret[1].(time0.Unit)
+ ret2, _ := ret[2].(ts.Annotation)
+ return ret0, ret1, ret2
+}
+
+// Current indicates an expected call of Current
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Current() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Current))
+}
+
+// Err mocks base method
+func (m *MockSeriesIteratorAccumulator) Err() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Err")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Err indicates an expected call of Err
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Err() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Err))
+}
+
+// Close mocks base method
+func (m *MockSeriesIteratorAccumulator) Close() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Close")
+}
+
+// Close indicates an expected call of Close
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Close))
+}
+
+// ID mocks base method
+func (m *MockSeriesIteratorAccumulator) ID() ident.ID {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ID")
+ ret0, _ := ret[0].(ident.ID)
+ return ret0
+}
+
+// ID indicates an expected call of ID
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) ID() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).ID))
+}
+
+// Namespace mocks base method
+func (m *MockSeriesIteratorAccumulator) Namespace() ident.ID {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Namespace")
+ ret0, _ := ret[0].(ident.ID)
+ return ret0
+}
+
+// Namespace indicates an expected call of Namespace
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Namespace() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Namespace", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Namespace))
+}
+
+// Start mocks base method
+func (m *MockSeriesIteratorAccumulator) Start() time.Time {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Start")
+ ret0, _ := ret[0].(time.Time)
+ return ret0
+}
+
+// Start indicates an expected call of Start
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Start() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Start))
+}
+
+// End mocks base method
+func (m *MockSeriesIteratorAccumulator) End() time.Time {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "End")
+ ret0, _ := ret[0].(time.Time)
+ return ret0
+}
+
+// End indicates an expected call of End
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) End() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "End", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).End))
+}
+
+// Reset mocks base method
+func (m *MockSeriesIteratorAccumulator) Reset(opts SeriesIteratorOptions) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Reset", opts)
+}
+
+// Reset indicates an expected call of Reset
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Reset(opts interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Reset), opts)
+}
+
+// SetIterateEqualTimestampStrategy mocks base method
+func (m *MockSeriesIteratorAccumulator) SetIterateEqualTimestampStrategy(strategy IterateEqualTimestampStrategy) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetIterateEqualTimestampStrategy", strategy)
+}
+
+// SetIterateEqualTimestampStrategy indicates an expected call of SetIterateEqualTimestampStrategy
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) SetIterateEqualTimestampStrategy(strategy interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIterateEqualTimestampStrategy", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).SetIterateEqualTimestampStrategy), strategy)
+}
+
+// Stats mocks base method
+func (m *MockSeriesIteratorAccumulator) Stats() (SeriesIteratorStats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stats")
+ ret0, _ := ret[0].(SeriesIteratorStats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Stats indicates an expected call of Stats
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Stats() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Stats))
+}
+
+// Replicas mocks base method
+func (m *MockSeriesIteratorAccumulator) Replicas() ([]MultiReaderIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Replicas")
+ ret0, _ := ret[0].([]MultiReaderIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Replicas indicates an expected call of Replicas
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Replicas() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Replicas", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Replicas))
+}
+
+// Tags mocks base method
+func (m *MockSeriesIteratorAccumulator) Tags() ident.TagIterator {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Tags")
+ ret0, _ := ret[0].(ident.TagIterator)
+ return ret0
+}
+
+// Tags indicates an expected call of Tags
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Tags() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tags", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Tags))
+}
+
+// Add mocks base method
+func (m *MockSeriesIteratorAccumulator) Add(it SeriesIterator) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Add", it)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Add indicates an expected call of Add
+func (mr *MockSeriesIteratorAccumulatorMockRecorder) Add(it interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockSeriesIteratorAccumulator)(nil).Add), it)
+}
+
// MockSeriesIterator is a mock of SeriesIterator interface
type MockSeriesIterator struct {
ctrl *gomock.Controller
@@ -927,20 +1173,6 @@ func (mr *MockSeriesIteratorMockRecorder) Namespace() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Namespace", reflect.TypeOf((*MockSeriesIterator)(nil).Namespace))
}
-// Tags mocks base method
-func (m *MockSeriesIterator) Tags() ident.TagIterator {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Tags")
- ret0, _ := ret[0].(ident.TagIterator)
- return ret0
-}
-
-// Tags indicates an expected call of Tags
-func (mr *MockSeriesIteratorMockRecorder) Tags() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tags", reflect.TypeOf((*MockSeriesIterator)(nil).Tags))
-}
-
// Start mocks base method
func (m *MockSeriesIterator) Start() time.Time {
m.ctrl.T.Helper()
@@ -993,12 +1225,28 @@ func (mr *MockSeriesIteratorMockRecorder) SetIterateEqualTimestampStrategy(strat
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIterateEqualTimestampStrategy", reflect.TypeOf((*MockSeriesIterator)(nil).SetIterateEqualTimestampStrategy), strategy)
}
+// Stats mocks base method
+func (m *MockSeriesIterator) Stats() (SeriesIteratorStats, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stats")
+ ret0, _ := ret[0].(SeriesIteratorStats)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Stats indicates an expected call of Stats
+func (mr *MockSeriesIteratorMockRecorder) Stats() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockSeriesIterator)(nil).Stats))
+}
+
// Replicas mocks base method
-func (m *MockSeriesIterator) Replicas() []MultiReaderIterator {
+func (m *MockSeriesIterator) Replicas() ([]MultiReaderIterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Replicas")
ret0, _ := ret[0].([]MultiReaderIterator)
- return ret0
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// Replicas indicates an expected call of Replicas
@@ -1007,19 +1255,56 @@ func (mr *MockSeriesIteratorMockRecorder) Replicas() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Replicas", reflect.TypeOf((*MockSeriesIterator)(nil).Replicas))
}
-// Stats mocks base method
-func (m *MockSeriesIterator) Stats() (SeriesIteratorStats, error) {
+// Tags mocks base method
+func (m *MockSeriesIterator) Tags() ident.TagIterator {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Stats")
- ret0, _ := ret[0].(SeriesIteratorStats)
+ ret := m.ctrl.Call(m, "Tags")
+ ret0, _ := ret[0].(ident.TagIterator)
+ return ret0
+}
+
+// Tags indicates an expected call of Tags
+func (mr *MockSeriesIteratorMockRecorder) Tags() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tags", reflect.TypeOf((*MockSeriesIterator)(nil).Tags))
+}
+
+// MockSeriesIteratorConsolidator is a mock of SeriesIteratorConsolidator interface
+type MockSeriesIteratorConsolidator struct {
+ ctrl *gomock.Controller
+ recorder *MockSeriesIteratorConsolidatorMockRecorder
+}
+
+// MockSeriesIteratorConsolidatorMockRecorder is the mock recorder for MockSeriesIteratorConsolidator
+type MockSeriesIteratorConsolidatorMockRecorder struct {
+ mock *MockSeriesIteratorConsolidator
+}
+
+// NewMockSeriesIteratorConsolidator creates a new mock instance
+func NewMockSeriesIteratorConsolidator(ctrl *gomock.Controller) *MockSeriesIteratorConsolidator {
+ mock := &MockSeriesIteratorConsolidator{ctrl: ctrl}
+ mock.recorder = &MockSeriesIteratorConsolidatorMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockSeriesIteratorConsolidator) EXPECT() *MockSeriesIteratorConsolidatorMockRecorder {
+ return m.recorder
+}
+
+// ConsolidateReplicas mocks base method
+func (m *MockSeriesIteratorConsolidator) ConsolidateReplicas(replicas []MultiReaderIterator) ([]MultiReaderIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ConsolidateReplicas", replicas)
+ ret0, _ := ret[0].([]MultiReaderIterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// Stats indicates an expected call of Stats
-func (mr *MockSeriesIteratorMockRecorder) Stats() *gomock.Call {
+// ConsolidateReplicas indicates an expected call of ConsolidateReplicas
+func (mr *MockSeriesIteratorConsolidatorMockRecorder) ConsolidateReplicas(replicas interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockSeriesIterator)(nil).Stats))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConsolidateReplicas", reflect.TypeOf((*MockSeriesIteratorConsolidator)(nil).ConsolidateReplicas), replicas)
}
// MockSeriesIterators is a mock of SeriesIterators interface
@@ -1292,7 +1577,7 @@ func (mr *MockIStreamMockRecorder) ReadByte() *gomock.Call {
}
// ReadBits mocks base method
-func (m *MockIStream) ReadBits(numBits int) (uint64, error) {
+func (m *MockIStream) ReadBits(numBits uint) (uint64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadBits", numBits)
ret0, _ := ret[0].(uint64)
@@ -1307,7 +1592,7 @@ func (mr *MockIStreamMockRecorder) ReadBits(numBits interface{}) *gomock.Call {
}
// PeekBits mocks base method
-func (m *MockIStream) PeekBits(numBits int) (uint64, error) {
+func (m *MockIStream) PeekBits(numBits uint) (uint64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PeekBits", numBits)
ret0, _ := ret[0].(uint64)
@@ -1322,10 +1607,10 @@ func (mr *MockIStreamMockRecorder) PeekBits(numBits interface{}) *gomock.Call {
}
// RemainingBitsInCurrentByte mocks base method
-func (m *MockIStream) RemainingBitsInCurrentByte() int {
+func (m *MockIStream) RemainingBitsInCurrentByte() uint {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemainingBitsInCurrentByte")
- ret0, _ := ret[0].(int)
+ ret0, _ := ret[0].(uint)
return ret0
}
@@ -1487,19 +1772,19 @@ func (mr *MockOStreamMockRecorder) Discard() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discard", reflect.TypeOf((*MockOStream)(nil).Discard))
}
-// Rawbytes mocks base method
-func (m *MockOStream) Rawbytes() ([]byte, int) {
+// RawBytes mocks base method
+func (m *MockOStream) RawBytes() ([]byte, int) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Rawbytes")
+ ret := m.ctrl.Call(m, "RawBytes")
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(int)
return ret0, ret1
}
-// Rawbytes indicates an expected call of Rawbytes
-func (mr *MockOStreamMockRecorder) Rawbytes() *gomock.Call {
+// RawBytes indicates an expected call of RawBytes
+func (mr *MockOStreamMockRecorder) RawBytes() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rawbytes", reflect.TypeOf((*MockOStream)(nil).Rawbytes))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RawBytes", reflect.TypeOf((*MockOStream)(nil).RawBytes))
}
// CheckedBytes mocks base method
diff --git a/src/dbnode/encoding/istream.go b/src/dbnode/encoding/istream.go
index cc6686c409..f4eeba5426 100644
--- a/src/dbnode/encoding/istream.go
+++ b/src/dbnode/encoding/istream.go
@@ -31,15 +31,19 @@ type istream struct {
r *bufio.Reader // encoded stream
err error // error encountered
current byte // current byte we are working off of
- remaining int // bits remaining in current to be read
+ buffer []byte // buffer for reading in multiple bytes
+ remaining uint // bits remaining in current to be read
}
// NewIStream creates a new Istream
func NewIStream(reader io.Reader, bufioSize int) IStream {
- return &istream{r: bufio.NewReaderSize(reader, bufioSize)}
+ return &istream{
+ r: bufio.NewReaderSize(reader, bufioSize),
+ // Buffer meant to hold uint64 size of bytes.
+ buffer: make([]byte, 8),
+ }
}
-// ReadBit reads the next Bit
func (is *istream) ReadBit() (Bit, error) {
if is.err != nil {
return 0, is.err
@@ -52,7 +56,6 @@ func (is *istream) ReadBit() (Bit, error) {
return Bit(is.consumeBuffer(1)), nil
}
-// Read reads len(b) bytes.
func (is *istream) Read(b []byte) (int, error) {
if is.remaining == 0 {
// Optimized path for when the iterator is already aligned on a byte boundary. Avoids
@@ -75,7 +78,6 @@ func (is *istream) Read(b []byte) (int, error) {
return i, nil
}
-// ReadByte reads the next Byte
func (is *istream) ReadByte() (byte, error) {
if is.err != nil {
return 0, is.err
@@ -92,22 +94,26 @@ func (is *istream) ReadByte() (byte, error) {
return res, nil
}
-// ReadBits reads the next Bits
-func (is *istream) ReadBits(numBits int) (uint64, error) {
+func (is *istream) ReadBits(numBits uint) (uint64, error) {
if is.err != nil {
return 0, is.err
}
-
var res uint64
- for numBits >= 8 {
- byteRead, err := is.ReadByte()
+ numBytes := numBits / 8
+ if numBytes > 0 {
+ // Use Read call rather than individual ReadByte calls since it has
+ // optimized path for when the iterator is aligned on a byte boundary.
+ bytes := is.buffer[0:numBytes]
+ _, err := is.Read(bytes)
if err != nil {
return 0, err
}
- res = (res << 8) | uint64(byteRead)
- numBits -= 8
+ for _, b := range bytes {
+ res = (res << 8) | uint64(b)
+ }
}
+ numBits = numBits % 8
for numBits > 0 {
// This is equivalent to calling is.ReadBit() in a loop but some manual inlining
// has been performed to optimize this loop as its heavily used in the hot path.
@@ -121,8 +127,8 @@ func (is *istream) ReadBits(numBits int) (uint64, error) {
if is.remaining < numToRead {
numToRead = is.remaining
}
- bits := is.current >> uint(8-numToRead)
- is.current <<= uint(numToRead)
+ bits := is.current >> (8 - numToRead)
+ is.current <<= numToRead
is.remaining -= numToRead
res = (res << uint64(numToRead)) | uint64(bits)
numBits -= numToRead
@@ -130,11 +136,7 @@ func (is *istream) ReadBits(numBits int) (uint64, error) {
return res, nil
}
-// PeekBits looks at the next Bits, but doesn't move the pos
-func (is *istream) PeekBits(numBits int) (uint64, error) {
- if is.err != nil {
- return 0, is.err
- }
+func (is *istream) PeekBits(numBits uint) (uint64, error) {
// check the last byte first
if numBits <= is.remaining {
return uint64(readBitsInByte(is.current, numBits)), nil
@@ -152,25 +154,23 @@ func (is *istream) PeekBits(numBits int) (uint64, error) {
numBitsRead += 8
}
remainder := readBitsInByte(bytesRead[numBytesToRead-1], numBits-numBitsRead)
- res = (res << uint(numBits-numBitsRead)) | uint64(remainder)
+ res = (res << (numBits - numBitsRead)) | uint64(remainder)
return res, nil
}
-// RemainingBitsInCurrentByte returns the number of bits remaining to be read in
-// the current byte.
-func (is *istream) RemainingBitsInCurrentByte() int {
+func (is *istream) RemainingBitsInCurrentByte() uint {
return is.remaining
}
// readBitsInByte reads numBits in byte b.
-func readBitsInByte(b byte, numBits int) byte {
- return b >> uint(8-numBits)
+func readBitsInByte(b byte, numBits uint) byte {
+ return b >> (8 - numBits)
}
// consumeBuffer consumes numBits in is.current.
-func (is *istream) consumeBuffer(numBits int) byte {
+func (is *istream) consumeBuffer(numBits uint) byte {
res := readBitsInByte(is.current, numBits)
- is.current <<= uint(numBits)
+ is.current <<= numBits
is.remaining -= numBits
return res
}
@@ -181,7 +181,6 @@ func (is *istream) readByteFromStream() error {
return is.err
}
-// Reset resets the Istream
func (is *istream) Reset(r io.Reader) {
is.r.Reset(r)
is.err = nil
diff --git a/src/dbnode/encoding/istream_test.go b/src/dbnode/encoding/istream_test.go
index 7658c419f1..4841590b01 100644
--- a/src/dbnode/encoding/istream_test.go
+++ b/src/dbnode/encoding/istream_test.go
@@ -35,7 +35,7 @@ func TestReadBits(t *testing.T) {
o := NewIStream(bytes.NewReader(byteStream), 16)
is := o.(*istream)
- numBits := []int{1, 3, 4, 8, 7, 2, 64, 64}
+ numBits := []uint{1, 3, 4, 8, 7, 2, 64, 64}
var res []uint64
for _, v := range numBits {
read, err := is.ReadBits(v)
@@ -44,11 +44,9 @@ func TestReadBits(t *testing.T) {
}
expected := []uint64{0x1, 0x4, 0xa, 0xfe, 0x7e, 0x3, 0x1234567890abcdef, 0x1}
require.Equal(t, expected, res)
- require.NoError(t, is.err)
_, err := is.ReadBits(8)
require.Error(t, err)
- require.Error(t, is.err)
}
func TestPeekBitsSuccess(t *testing.T) {
@@ -56,7 +54,7 @@ func TestPeekBitsSuccess(t *testing.T) {
o := NewIStream(bytes.NewReader(byteStream), 16)
is := o.(*istream)
inputs := []struct {
- numBits int
+ numBits uint
expected uint64
}{
{0, 0},
@@ -73,9 +71,8 @@ func TestPeekBitsSuccess(t *testing.T) {
require.NoError(t, err)
require.Equal(t, input.expected, res)
}
- require.NoError(t, is.err)
require.Equal(t, byte(0), is.current)
- require.Equal(t, 0, is.remaining)
+ require.Equal(t, 0, int(is.remaining))
}
func TestPeekBitsError(t *testing.T) {
@@ -98,7 +95,7 @@ func TestReadAfterPeekBits(t *testing.T) {
require.Error(t, err)
inputs := []struct {
- numBits int
+ numBits uint
expected uint64
}{
{2, 0x2},
@@ -117,9 +114,7 @@ func TestResetIStream(t *testing.T) {
o := NewIStream(bytes.NewReader(nil), 16)
is := o.(*istream)
is.ReadBits(1)
- require.Error(t, is.err)
is.Reset(bytes.NewReader(nil))
- require.NoError(t, is.err)
require.Equal(t, byte(0), is.current)
- require.Equal(t, 0, is.remaining)
+ require.Equal(t, 0, int(is.remaining))
}
diff --git a/src/dbnode/encoding/iterator_test.go b/src/dbnode/encoding/iterator_test.go
index 4bb4c63b0f..6df695374d 100644
--- a/src/dbnode/encoding/iterator_test.go
+++ b/src/dbnode/encoding/iterator_test.go
@@ -71,7 +71,7 @@ func (it *testIterator) Current() (ts.Datapoint, xtime.Unit, ts.Annotation) {
idx = 0
}
v := it.values[idx]
- dp := ts.Datapoint{Timestamp: v.t, Value: v.value}
+ dp := ts.Datapoint{Timestamp: v.t, TimestampNanos: xtime.ToUnixNano(v.t), Value: v.value}
return dp, v.unit, ts.Annotation(v.annotation)
}
@@ -128,7 +128,7 @@ func (it *testMultiIterator) Current() (ts.Datapoint, xtime.Unit, ts.Annotation)
idx = 0
}
v := it.values[idx]
- dp := ts.Datapoint{Timestamp: v.t, Value: v.value}
+ dp := ts.Datapoint{Timestamp: v.t, TimestampNanos: xtime.ToUnixNano(v.t), Value: v.value}
return dp, v.unit, ts.Annotation(v.annotation)
}
@@ -154,6 +154,10 @@ func (it *testMultiIterator) ResetSliceOfSlices(readers xio.ReaderSliceOfSlicesI
}
}
+func (it *testMultiIterator) Schema() namespace.SchemaDescr {
+ return nil
+}
+
func (it *testMultiIterator) Readers() xio.ReaderSliceOfSlicesIterator {
return nil
}
@@ -194,6 +198,10 @@ func (it *testReaderSliceOfSlicesIterator) Size() (int, error) {
return 0, nil
}
+func (it *testReaderSliceOfSlicesIterator) Rewind() {
+ it.idx = -1
+}
+
func (it *testReaderSliceOfSlicesIterator) arrayIdx() int {
idx := it.idx
if idx == -1 {
diff --git a/src/dbnode/encoding/iterators.go b/src/dbnode/encoding/iterators.go
index 3ebd082142..7377794e2b 100644
--- a/src/dbnode/encoding/iterators.go
+++ b/src/dbnode/encoding/iterators.go
@@ -21,19 +21,16 @@
package encoding
import (
+ "math"
"sort"
- "time"
"github.com/m3db/m3/src/dbnode/ts"
xtime "github.com/m3db/m3/src/x/time"
)
var (
- // time is stored as an int64 plus an int32 nanosecond value, but if you
- // use max int64 for the seconds component only then integer overflow
- // will occur when performing comparisons like time.Before() and they
- // will not work correctly.
- timeMax = time.Unix(1<<63-62135596801, 999999999)
+ // UnixNano is an int64, so the max time is the max of that type.
+ timeMaxNanos = xtime.UnixNano(math.MaxInt64)
)
// iterators is a collection of iterators, and allows for reading in order values
@@ -41,9 +38,9 @@ var (
type iterators struct {
values []Iterator
earliest []Iterator
- earliestAt time.Time
- filterStart time.Time
- filterEnd time.Time
+ earliestAt xtime.UnixNano
+ filterStart xtime.UnixNano
+ filterEnd xtime.UnixNano
filtering bool
equalTimesStrategy IterateEqualTimestampStrategy
@@ -106,7 +103,7 @@ func (i *iterators) current() (ts.Datapoint, xtime.Unit, ts.Annotation) {
return i.earliest[numIters-1].Current()
}
-func (i *iterators) at() time.Time {
+func (i *iterators) at() xtime.UnixNano {
return i.earliestAt
}
@@ -121,13 +118,13 @@ func (i *iterators) push(iter Iterator) bool {
func (i *iterators) tryAddEarliest(iter Iterator) {
dp, _, _ := iter.Current()
- if dp.Timestamp.Equal(i.earliestAt) {
+ if dp.TimestampNanos == i.earliestAt {
// Push equal earliest
i.earliest = append(i.earliest, iter)
- } else if dp.Timestamp.Before(i.earliestAt) {
+ } else if dp.TimestampNanos < i.earliestAt {
// Reset earliest and push new iter
i.earliest = append(i.earliest[:0], iter)
- i.earliestAt = dp.Timestamp
+ i.earliestAt = dp.TimestampNanos
}
}
@@ -135,12 +132,12 @@ func (i *iterators) moveIteratorToFilterNext(iter Iterator) bool {
next := true
for next {
dp, _, _ := iter.Current()
- if dp.Timestamp.Before(i.filterStart) {
+ if dp.TimestampNanos < i.filterStart {
// Filter out any before start
next = iter.Next()
continue
}
- if !dp.Timestamp.Before(i.filterEnd) {
+ if dp.TimestampNanos >= i.filterEnd {
// Filter out completely if after end
next = false
break
@@ -201,15 +198,15 @@ func (i *iterators) moveToValidNext() (bool, error) {
}
// Force first to be new earliest, evaluate rest
- i.earliestAt = timeMax
+ i.earliestAt = timeMaxNanos
for _, iter := range i.values {
i.tryAddEarliest(iter)
}
// Apply filter to new earliest if necessary
if i.filtering {
- inFilter := i.earliestAt.Before(i.filterEnd) &&
- !i.earliestAt.Before(i.filterStart)
+ inFilter := i.earliestAt < i.filterEnd &&
+ i.earliestAt >= i.filterStart
if !inFilter {
return i.moveToValidNext()
}
@@ -218,8 +215,8 @@ func (i *iterators) moveToValidNext() (bool, error) {
return i.validateNext(true, prevAt)
}
-func (i *iterators) validateNext(next bool, prevAt time.Time) (bool, error) {
- if i.earliestAt.Before(prevAt) {
+func (i *iterators) validateNext(next bool, prevAt xtime.UnixNano) (bool, error) {
+ if i.earliestAt < prevAt {
// Out of order datapoint
i.reset()
return false, errOutOfOrderIterator
@@ -237,10 +234,10 @@ func (i *iterators) reset() {
i.earliest[idx] = nil
}
i.earliest = i.earliest[:0]
- i.earliestAt = timeMax
+ i.earliestAt = timeMaxNanos
}
-func (i *iterators) setFilter(start, end time.Time) {
+func (i *iterators) setFilter(start, end xtime.UnixNano) {
i.filtering = true
i.filterStart = start
i.filterEnd = end
diff --git a/src/dbnode/encoding/m3tsz/encoder.go b/src/dbnode/encoding/m3tsz/encoder.go
index b41598e5d0..78bf251915 100644
--- a/src/dbnode/encoding/m3tsz/encoder.go
+++ b/src/dbnode/encoding/m3tsz/encoder.go
@@ -263,7 +263,7 @@ func (enc *encoder) Reset(start time.Time, capacity int, schema namespace.Schema
func (enc *encoder) reset(start time.Time, bytes checked.Bytes) {
enc.os.Reset(bytes)
- timeUnit := initialTimeUnit(start, enc.opts.DefaultTimeUnit())
+ timeUnit := initialTimeUnit(xtime.ToUnixNano(start), enc.opts.DefaultTimeUnit())
enc.tsEncoderState = NewTimestampEncoder(start, timeUnit, enc.opts)
enc.floatEnc = FloatEncoderAndIterator{}
@@ -302,7 +302,10 @@ func (enc *encoder) LastEncoded() (ts.Datapoint, error) {
return ts.Datapoint{}, errNoEncodedDatapoints
}
- result := ts.Datapoint{Timestamp: enc.tsEncoderState.PrevTime}
+ result := ts.Datapoint{
+ Timestamp: enc.tsEncoderState.PrevTime,
+ TimestampNanos: xtime.ToUnixNano(enc.tsEncoderState.PrevTime),
+ }
if enc.isFloat {
result.Value = math.Float64frombits(enc.floatEnc.PrevFloatBits)
} else {
@@ -311,10 +314,19 @@ func (enc *encoder) LastEncoded() (ts.Datapoint, error) {
return result, nil
}
+// LastAnnotation returns the last encoded annotation.
+func (enc *encoder) LastAnnotation() (ts.Annotation, error) {
+ if enc.numEncoded == 0 {
+ return nil, errNoEncodedDatapoints
+ }
+
+ return enc.tsEncoderState.PrevAnnotation, nil
+}
+
// Len returns the length of the final data stream that would be generated
// by a call to Stream().
func (enc *encoder) Len() int {
- raw, pos := enc.os.Rawbytes()
+ raw, pos := enc.os.RawBytes()
if len(raw) == 0 {
return 0
}
@@ -376,7 +388,7 @@ func (enc *encoder) segmentZeroCopy(ctx context.Context) ts.Segment {
// We need a multibyte tail to capture an immutable snapshot
// of the encoder data.
- rawBuffer, pos := enc.os.Rawbytes()
+ rawBuffer, pos := enc.os.RawBytes()
lastByte := rawBuffer[length-1]
// Take ref up to last byte.
@@ -402,7 +414,7 @@ func (enc *encoder) segmentZeroCopy(ctx context.Context) ts.Segment {
// NB(r): Finalize the head bytes whether this is by ref or copy. If by
// ref we have no ref to it anymore and if by copy then the owner should
// be finalizing the bytes when the segment is finalized.
- return ts.NewSegment(head, tail, ts.FinalizeHead)
+ return ts.NewSegment(head, tail, 0, ts.FinalizeHead)
}
func (enc *encoder) segmentTakeOwnership() ts.Segment {
@@ -412,7 +424,7 @@ func (enc *encoder) segmentTakeOwnership() ts.Segment {
}
// We need a multibyte tail since the tail isn't set correctly midstream.
- rawBuffer, pos := enc.os.Rawbytes()
+ rawBuffer, pos := enc.os.RawBytes()
lastByte := rawBuffer[length-1]
// Take ref from the ostream.
@@ -430,5 +442,5 @@ func (enc *encoder) segmentTakeOwnership() ts.Segment {
// NB(r): Finalize the head bytes whether this is by ref or copy. If by
// ref we have no ref to it anymore and if by copy then the owner should
// be finalizing the bytes when the segment is finalized.
- return ts.NewSegment(head, tail, ts.FinalizeHead)
+ return ts.NewSegment(head, tail, 0, ts.FinalizeHead)
}
diff --git a/src/dbnode/encoding/m3tsz/encoder_test.go b/src/dbnode/encoding/m3tsz/encoder_test.go
index 6ea10bad81..c9496cf893 100644
--- a/src/dbnode/encoding/m3tsz/encoder_test.go
+++ b/src/dbnode/encoding/m3tsz/encoder_test.go
@@ -72,7 +72,7 @@ func TestWriteDeltaOfDeltaTimeUnitUnchanged(t *testing.T) {
stream := encoding.NewOStream(nil, false, nil)
tsEncoder := NewTimestampEncoder(testStartTime, input.timeUnit, encoding.NewOptions())
tsEncoder.writeDeltaOfDeltaTimeUnitUnchanged(stream, 0, input.delta, input.timeUnit)
- b, p := stream.Rawbytes()
+ b, p := stream.RawBytes()
require.Equal(t, input.expectedBytes, b)
require.Equal(t, input.expectedPos, p)
}
@@ -92,7 +92,7 @@ func TestWriteDeltaOfDeltaTimeUnitChanged(t *testing.T) {
stream := encoding.NewOStream(nil, false, nil)
tsEncoder := NewTimestampEncoder(testStartTime, xtime.Nanosecond, nil)
tsEncoder.writeDeltaOfDeltaTimeUnitChanged(stream, 0, input.delta)
- b, p := stream.Rawbytes()
+ b, p := stream.RawBytes()
require.Equal(t, input.expectedBytes, b)
require.Equal(t, input.expectedPos, p)
}
@@ -114,7 +114,7 @@ func TestWriteValue(t *testing.T) {
encoder.Reset(testStartTime, 0, nil)
eit := FloatEncoderAndIterator{PrevXOR: input.previousXOR}
eit.writeXOR(encoder.os, input.currentXOR)
- b, p := encoder.os.Rawbytes()
+ b, p := encoder.os.RawBytes()
require.Equal(t, input.expectedBytes, b)
require.Equal(t, input.expectedPos, p)
}
@@ -146,7 +146,7 @@ func TestWriteAnnotation(t *testing.T) {
stream := encoding.NewOStream(nil, false, nil)
tsEncoder := NewTimestampEncoder(time.Time{}, xtime.Nanosecond, encoding.NewOptions())
tsEncoder.writeAnnotation(stream, input.annotation)
- b, p := stream.Rawbytes()
+ b, p := stream.RawBytes()
require.Equal(t, input.expectedBytes, b)
require.Equal(t, input.expectedPos, p)
}
@@ -195,7 +195,7 @@ func TestWriteTimeUnit(t *testing.T) {
tsEncoder := NewTimestampEncoder(time.Time{}, xtime.Nanosecond, encoding.NewOptions())
tsEncoder.TimeUnit = xtime.None
assert.Equal(t, input.expectedResult, tsEncoder.maybeWriteTimeUnitChange(stream, input.timeUnit))
- b, p := stream.Rawbytes()
+ b, p := stream.RawBytes()
assert.Equal(t, input.expectedBytes, b)
assert.Equal(t, input.expectedPos, p)
}
@@ -211,13 +211,13 @@ func TestEncodeNoAnnotation(t *testing.T) {
startTime := time.Unix(1427162462, 0)
inputs := []ts.Datapoint{
- {startTime, 12},
- {startTime.Add(time.Second * 60), 12},
- {startTime.Add(time.Second * 120), 24},
- {startTime.Add(-time.Second * 76), 24},
- {startTime.Add(-time.Second * 16), 24},
- {startTime.Add(time.Second * 2092), 15},
- {startTime.Add(time.Second * 4200), 12},
+ {Timestamp: startTime, Value: 12},
+ {Timestamp: startTime.Add(time.Second * 60), Value: 12},
+ {Timestamp: startTime.Add(time.Second * 120), Value: 24},
+ {Timestamp: startTime.Add(-time.Second * 76), Value: 24},
+ {Timestamp: startTime.Add(-time.Second * 16), Value: 24},
+ {Timestamp: startTime.Add(time.Second * 2092), Value: 15},
+ {Timestamp: startTime.Add(time.Second * 4200), Value: 12},
}
for _, input := range inputs {
encoder.Encode(input, xtime.Second, nil)
@@ -236,7 +236,7 @@ func TestEncodeNoAnnotation(t *testing.T) {
0x40, 0x6, 0x58, 0x76, 0x8c,
}
- b, p := encoder.os.Rawbytes()
+ b, p := encoder.os.RawBytes()
require.Equal(t, expectedBuffer, b)
require.Equal(t, 6, p)
}
@@ -254,13 +254,13 @@ func TestEncodeWithAnnotation(t *testing.T) {
dp ts.Datapoint
ant ts.Annotation
}{
- {ts.Datapoint{startTime, 12}, []byte{0xa}},
- {ts.Datapoint{startTime.Add(time.Second * 60), 12}, []byte{0xa}},
- {ts.Datapoint{startTime.Add(time.Second * 120), 24}, nil},
- {ts.Datapoint{startTime.Add(-time.Second * 76), 24}, nil},
- {ts.Datapoint{startTime.Add(-time.Second * 16), 24}, []byte{0x1, 0x2}},
- {ts.Datapoint{startTime.Add(time.Second * 2092), 15}, nil},
- {ts.Datapoint{startTime.Add(time.Second * 4200), 12}, nil},
+ {ts.Datapoint{Timestamp: startTime, Value: 12}, []byte{0xa}},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 60), Value: 12}, []byte{0xa}},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 120), Value: 24}, nil},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 76), Value: 24}, nil},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 16), Value: 24}, []byte{0x1, 0x2}},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 2092), Value: 15}, nil},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 4200), Value: 12}, nil},
}
for _, input := range inputs {
@@ -273,7 +273,7 @@ func TestEncodeWithAnnotation(t *testing.T) {
0x8, 0x4, 0xb, 0x84, 0x1, 0xe0, 0x0, 0x1, 0x0, 0x19, 0x61, 0xda, 0x30,
}
- b, p := encoder.os.Rawbytes()
+ b, p := encoder.os.RawBytes()
require.Equal(t, expectedBuffer, b)
require.Equal(t, 4, p)
@@ -298,15 +298,15 @@ func TestEncodeWithTimeUnit(t *testing.T) {
dp ts.Datapoint
tu xtime.Unit
}{
- {ts.Datapoint{startTime, 12}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 60), 12}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 120), 24}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 76), 24}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 16), 24}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Nanosecond * 15500000000), 15}, xtime.Nanosecond},
- {ts.Datapoint{startTime.Add(-time.Millisecond * 1400), 12}, xtime.Millisecond},
- {ts.Datapoint{startTime.Add(-time.Second * 10), 12}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 10), 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime, Value: 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 60), Value: 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 120), Value: 24}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 76), Value: 24}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 16), Value: 24}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Nanosecond * 15500000000), Value: 15}, xtime.Nanosecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Millisecond * 1400), Value: 12}, xtime.Millisecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 10), Value: 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 10), Value: 12}, xtime.Second},
}
for _, input := range inputs {
@@ -337,13 +337,13 @@ func TestEncodeWithAnnotationAndTimeUnit(t *testing.T) {
ant ts.Annotation
tu xtime.Unit
}{
- {ts.Datapoint{startTime, 12}, []byte{0xa}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 60), 12}, nil, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 120), 24}, nil, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 76), 24}, []byte{0x1, 0x2}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 16), 24}, nil, xtime.Millisecond},
- {ts.Datapoint{startTime.Add(-time.Millisecond * 15500), 15}, []byte{0x3, 0x4, 0x5}, xtime.Millisecond},
- {ts.Datapoint{startTime.Add(-time.Millisecond * 14000), 12}, nil, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime, Value: 12}, []byte{0xa}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 60), Value: 12}, nil, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 120), Value: 24}, nil, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 76), Value: 24}, []byte{0x1, 0x2}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 16), Value: 24}, nil, xtime.Millisecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Millisecond * 15500), Value: 15}, []byte{0x3, 0x4, 0x5}, xtime.Millisecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Millisecond * 14000), Value: 12}, nil, xtime.Second},
}
for _, input := range inputs {
@@ -374,7 +374,7 @@ func TestInitTimeUnit(t *testing.T) {
{time.Unix(1, 1000), xtime.Unit(9), xtime.None},
}
for _, input := range inputs {
- require.Equal(t, input.expected, initialTimeUnit(input.start, input.tu))
+ require.Equal(t, input.expected, initialTimeUnit(xtime.ToUnixNano(input.start), input.tu))
}
}
@@ -389,7 +389,7 @@ func TestEncoderResets(t *testing.T) {
_, ok := enc.Stream(ctx)
require.False(t, ok)
- enc.Encode(ts.Datapoint{testStartTime, 12}, xtime.Second, nil)
+ enc.Encode(ts.Datapoint{Timestamp: testStartTime, Value: 12}, xtime.Second, nil)
require.True(t, enc.os.Len() > 0)
now := time.Now()
@@ -397,17 +397,17 @@ func TestEncoderResets(t *testing.T) {
require.Equal(t, 0, enc.os.Len())
_, ok = enc.Stream(ctx)
require.False(t, ok)
- b, _ := enc.os.Rawbytes()
+ b, _ := enc.os.RawBytes()
require.Equal(t, []byte{}, b)
- enc.Encode(ts.Datapoint{now, 13}, xtime.Second, nil)
+ enc.Encode(ts.Datapoint{Timestamp: now, Value: 13}, xtime.Second, nil)
require.True(t, enc.os.Len() > 0)
enc.DiscardReset(now, 0, nil)
require.Equal(t, 0, enc.os.Len())
_, ok = enc.Stream(ctx)
require.False(t, ok)
- b, _ = enc.os.Rawbytes()
+ b, _ = enc.os.RawBytes()
require.Equal(t, []byte{}, b)
}
diff --git a/src/dbnode/encoding/m3tsz/float_encoder_iterator.go b/src/dbnode/encoding/m3tsz/float_encoder_iterator.go
index fa185095d9..b27f959ac2 100644
--- a/src/dbnode/encoding/m3tsz/float_encoder_iterator.go
+++ b/src/dbnode/encoding/m3tsz/float_encoder_iterator.go
@@ -134,7 +134,7 @@ func (eit *FloatEncoderAndIterator) readNextFloat(stream encoding.IStream) error
cb = (cb << 1) | nextCB
if cb == opcodeContainedValueXOR {
previousLeading, previousTrailing := encoding.LeadingAndTrailingZeros(eit.PrevXOR)
- numMeaningfulBits := 64 - previousLeading - previousTrailing
+ numMeaningfulBits := uint(64 - previousLeading - previousTrailing)
meaningfulBits, err := stream.ReadBits(numMeaningfulBits)
if err != nil {
return err
@@ -153,7 +153,7 @@ func (eit *FloatEncoderAndIterator) readNextFloat(stream encoding.IStream) error
numLeadingZeros := (numLeadingZeroesAndNumMeaningfulBits & bits12To6Mask) >> 6
numMeaningfulBits := (numLeadingZeroesAndNumMeaningfulBits & bits6To0Mask) + 1
- meaningfulBits, err := stream.ReadBits(int(numMeaningfulBits))
+ meaningfulBits, err := stream.ReadBits(uint(numMeaningfulBits))
if err != nil {
return err
}
diff --git a/src/dbnode/encoding/m3tsz/iterator.go b/src/dbnode/encoding/m3tsz/iterator.go
index c206202bad..b2f7e69fa2 100644
--- a/src/dbnode/encoding/m3tsz/iterator.go
+++ b/src/dbnode/encoding/m3tsz/iterator.go
@@ -165,10 +165,10 @@ func (it *readerIterator) readIntValDiff() {
sign = 1.0
}
- it.intVal += sign * float64(it.readBits(int(it.sig)))
+ it.intVal += sign * float64(it.readBits(uint(it.sig)))
}
-func (it *readerIterator) readBits(numBits int) uint64 {
+func (it *readerIterator) readBits(numBits uint) uint64 {
if !it.hasNext() {
return 0
}
@@ -183,14 +183,16 @@ func (it *readerIterator) readBits(numBits int) uint64 {
func (it *readerIterator) Current() (ts.Datapoint, xtime.Unit, ts.Annotation) {
if !it.intOptimized || it.isFloat {
return ts.Datapoint{
- Timestamp: it.tsIterator.PrevTime,
- Value: math.Float64frombits(it.floatIter.PrevFloatBits),
+ Timestamp: it.tsIterator.PrevTime.ToTime(),
+ TimestampNanos: it.tsIterator.PrevTime,
+ Value: math.Float64frombits(it.floatIter.PrevFloatBits),
}, it.tsIterator.TimeUnit, it.tsIterator.PrevAnt
}
return ts.Datapoint{
- Timestamp: it.tsIterator.PrevTime,
- Value: convertFromIntFloat(it.intVal, it.mult),
+ Timestamp: it.tsIterator.PrevTime.ToTime(),
+ TimestampNanos: it.tsIterator.PrevTime,
+ Value: convertFromIntFloat(it.intVal, it.mult),
}, it.tsIterator.TimeUnit, it.tsIterator.PrevAnt
}
diff --git a/src/dbnode/encoding/m3tsz/iterator_test.go b/src/dbnode/encoding/m3tsz/iterator_test.go
index e6b3cd15a2..d5550e59dd 100644
--- a/src/dbnode/encoding/m3tsz/iterator_test.go
+++ b/src/dbnode/encoding/m3tsz/iterator_test.go
@@ -176,13 +176,13 @@ func TestReaderIteratorNextNoAnnotation(t *testing.T) {
}
startTime := time.Unix(1427162462, 0)
inputs := []ts.Datapoint{
- {startTime, 12},
- {startTime.Add(time.Second * 60), 12},
- {startTime.Add(time.Second * 120), 24},
- {startTime.Add(-time.Second * 76), 24},
- {startTime.Add(-time.Second * 16), 24},
- {startTime.Add(time.Second * 2092), 15},
- {startTime.Add(time.Second * 4200), 12},
+ {Timestamp: startTime, Value: 12},
+ {Timestamp: startTime.Add(time.Second * 60), Value: 12},
+ {Timestamp: startTime.Add(time.Second * 120), Value: 24},
+ {Timestamp: startTime.Add(-time.Second * 76), Value: 24},
+ {Timestamp: startTime.Add(-time.Second * 16), Value: 24},
+ {Timestamp: startTime.Add(time.Second * 2092), Value: 15},
+ {Timestamp: startTime.Add(time.Second * 4200), Value: 12},
}
it := getTestReaderIterator(rawBytes)
for i := 0; i < len(inputs); i++ {
@@ -222,13 +222,13 @@ func TestReaderIteratorNextWithAnnotation(t *testing.T) {
dp ts.Datapoint
ant ts.Annotation
}{
- {ts.Datapoint{startTime, 12}, []byte{0xa}},
- {ts.Datapoint{startTime.Add(time.Second * 60), 12}, nil},
- {ts.Datapoint{startTime.Add(time.Second * 120), 24}, nil},
- {ts.Datapoint{startTime.Add(-time.Second * 76), 24}, nil},
- {ts.Datapoint{startTime.Add(-time.Second * 16), 24}, []byte{0x1, 0x2}},
- {ts.Datapoint{startTime.Add(time.Second * 2092), 15}, nil},
- {ts.Datapoint{startTime.Add(time.Second * 4200), 12}, nil},
+ {ts.Datapoint{Timestamp: startTime, Value: 12}, []byte{0xa}},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 60), Value: 12}, nil},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 120), Value: 24}, nil},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 76), Value: 24}, nil},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 16), Value: 24}, []byte{0x1, 0x2}},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 2092), Value: 15}, nil},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 4200), Value: 12}, nil},
}
it := getTestReaderIterator(rawBytes)
for i := 0; i < len(inputs); i++ {
@@ -271,15 +271,15 @@ func TestReaderIteratorNextWithTimeUnit(t *testing.T) {
dp ts.Datapoint
tu xtime.Unit
}{
- {ts.Datapoint{startTime, 12}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 60), 12}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 120), 24}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 76), 24}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 16), 24}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Nanosecond * 15500000000), 15}, xtime.Nanosecond},
- {ts.Datapoint{startTime.Add(-time.Millisecond * 1400), 12}, xtime.Millisecond},
- {ts.Datapoint{startTime.Add(-time.Second * 10), 12}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 10), 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime, Value: 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 60), Value: 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 120), Value: 24}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 76), Value: 24}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 16), Value: 24}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Nanosecond * 15500000000), Value: 15}, xtime.Nanosecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Millisecond * 1400), Value: 12}, xtime.Millisecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 10), Value: 12}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 10), Value: 12}, xtime.Second},
}
it := getTestReaderIterator(rawBytes)
for i := 0; i < len(inputs); i++ {
@@ -316,13 +316,13 @@ func TestReaderIteratorNextWithAnnotationAndTimeUnit(t *testing.T) {
ant ts.Annotation
tu xtime.Unit
}{
- {ts.Datapoint{startTime, 12}, []byte{0xa}, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 60), 12}, nil, xtime.Second},
- {ts.Datapoint{startTime.Add(time.Second * 120), 24}, nil, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 76), 24}, []byte{0x1, 0x2}, xtime.Second},
- {ts.Datapoint{startTime.Add(-time.Second * 16), 24}, nil, xtime.Millisecond},
- {ts.Datapoint{startTime.Add(-time.Millisecond * 15500), 15}, []byte{0x3, 0x4, 0x5}, xtime.Millisecond},
- {ts.Datapoint{startTime.Add(-time.Millisecond * 14000), 12}, nil, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime, Value: 12}, []byte{0xa}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 60), Value: 12}, nil, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(time.Second * 120), Value: 24}, nil, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 76), Value: 24}, []byte{0x1, 0x2}, xtime.Second},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Second * 16), Value: 24}, nil, xtime.Millisecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Millisecond * 15500), Value: 15}, []byte{0x3, 0x4, 0x5}, xtime.Millisecond},
+ {ts.Datapoint{Timestamp: startTime.Add(-time.Millisecond * 14000), Value: 12}, nil, xtime.Second},
}
it := getTestReaderIterator(rawBytes)
for i := 0; i < len(inputs); i++ {
diff --git a/src/dbnode/encoding/m3tsz/roundtrip_test.go b/src/dbnode/encoding/m3tsz/roundtrip_test.go
index e6471026da..1e8356bb47 100644
--- a/src/dbnode/encoding/m3tsz/roundtrip_test.go
+++ b/src/dbnode/encoding/m3tsz/roundtrip_test.go
@@ -198,7 +198,7 @@ func generateDataPoints(numPoints int, timeUnit time.Duration, numDig, numDec in
currentTime := time.Unix(startTime, 0)
endTime := testStartTime.Add(2 * time.Hour)
currentValue := 1.0
- res := []ts.Datapoint{{currentTime, currentValue}}
+ res := []ts.Datapoint{{currentTime, xtime.ToUnixNano(currentTime), currentValue}}
for i := 1; i < numPoints; i++ {
currentTime = currentTime.Add(time.Second * time.Duration(rand.Intn(1200)))
currentValue = testgen.GenerateFloatVal(r, numDig, numDec)
@@ -216,7 +216,7 @@ func generateMixedDatapoints(numPoints int, timeUnit time.Duration) []ts.Datapoi
currentTime := time.Unix(startTime, 0)
endTime := testStartTime.Add(2 * time.Hour)
currentValue := testgen.GenerateFloatVal(r, 3, 16)
- res := []ts.Datapoint{{currentTime, currentValue}}
+ res := []ts.Datapoint{{currentTime, xtime.ToUnixNano(currentTime), currentValue}}
for i := 1; i < numPoints; i++ {
currentTime = currentTime.Add(time.Second * time.Duration(r.Intn(7200)))
diff --git a/src/dbnode/encoding/m3tsz/timestamp_encoder.go b/src/dbnode/encoding/m3tsz/timestamp_encoder.go
index 7e8e22fb09..196e6b399e 100644
--- a/src/dbnode/encoding/m3tsz/timestamp_encoder.go
+++ b/src/dbnode/encoding/m3tsz/timestamp_encoder.go
@@ -54,7 +54,7 @@ func NewTimestampEncoder(
start time.Time, timeUnit xtime.Unit, opts encoding.Options) TimestampEncoder {
return TimestampEncoder{
PrevTime: start,
- TimeUnit: initialTimeUnit(start, timeUnit),
+ TimeUnit: initialTimeUnit(xtime.ToUnixNano(start), timeUnit),
Options: opts,
}
}
@@ -180,7 +180,7 @@ func (enc *TimestampEncoder) writeDeltaOfDeltaTimeUnitUnchanged(
}
deltaOfDelta := xtime.ToNormalizedDuration(curDelta-prevDelta, u)
- tes, exists := enc.Options.TimeEncodingSchemes()[timeUnit]
+ tes, exists := enc.Options.TimeEncodingSchemes().SchemeForUnit(timeUnit)
if !exists {
return fmt.Errorf("time encoding scheme for time unit %v doesn't exist", timeUnit)
}
@@ -205,16 +205,14 @@ func (enc *TimestampEncoder) writeDeltaOfDeltaTimeUnitUnchanged(
return nil
}
-func initialTimeUnit(start time.Time, tu xtime.Unit) xtime.Unit {
+func initialTimeUnit(start xtime.UnixNano, tu xtime.Unit) xtime.Unit {
tv, err := tu.Value()
if err != nil {
return xtime.None
}
// If we want to use tu as the time unit for start, start must
// be a multiple of tu.
- startInNano := xtime.ToNormalizedTime(start, time.Nanosecond)
- tvInNano := xtime.ToNormalizedDuration(tv, time.Nanosecond)
- if startInNano%tvInNano == 0 {
+ if start%xtime.UnixNano(tv) == 0 {
return tu
}
return xtime.None
diff --git a/src/dbnode/encoding/m3tsz/timestamp_iterator.go b/src/dbnode/encoding/m3tsz/timestamp_iterator.go
index 1eecfa8b78..ca3402607c 100644
--- a/src/dbnode/encoding/m3tsz/timestamp_iterator.go
+++ b/src/dbnode/encoding/m3tsz/timestamp_iterator.go
@@ -33,7 +33,7 @@ import (
// TimestampIterator encapsulates all the state required for iterating over
// delta-of-delta compresed timestamps.
type TimestampIterator struct {
- PrevTime time.Time
+ PrevTime xtime.UnixNano
PrevTimeDelta time.Duration
PrevAnt ts.Annotation
@@ -48,13 +48,21 @@ type TimestampIterator struct {
// schemes. Setting SkipMarkers to true disables the look ahead behavior
// for situations where looking ahead is not safe.
SkipMarkers bool
+
+ numValueBits uint
+ numBits uint
+ markerEncodingScheme encoding.MarkerEncodingScheme
}
// NewTimestampIterator creates a new TimestampIterator.
func NewTimestampIterator(opts encoding.Options, skipMarkers bool) TimestampIterator {
+ mes := opts.MarkerEncodingScheme()
return TimestampIterator{
- Opts: opts,
- SkipMarkers: skipMarkers,
+ Opts: opts,
+ SkipMarkers: skipMarkers,
+ numValueBits: uint(mes.NumValueBits()),
+ numBits: uint(mes.NumOpcodeBits() + mes.NumValueBits()),
+ markerEncodingScheme: mes,
}
}
@@ -66,7 +74,7 @@ func (it *TimestampIterator) ReadTimestamp(stream encoding.IStream) (bool, bool,
first = false
err error
)
- if it.PrevTime.IsZero() {
+ if it.PrevTime == 0 {
first = true
err = it.readFirstTimestamp(stream)
} else {
@@ -110,11 +118,10 @@ func (it *TimestampIterator) readFirstTimestamp(stream encoding.IStream) error {
return err
}
- nt := int64(ntBits)
// NB(xichen): first time stamp is always normalized to nanoseconds.
- st := xtime.FromNormalizedTime(nt, time.Nanosecond)
+ nt := xtime.UnixNano(ntBits)
if it.TimeUnit == xtime.None {
- it.TimeUnit = initialTimeUnit(st, it.Opts.DefaultTimeUnit())
+ it.TimeUnit = initialTimeUnit(nt, it.Opts.DefaultTimeUnit())
}
err = it.readNextTimestamp(stream)
@@ -122,7 +129,7 @@ func (it *TimestampIterator) readFirstTimestamp(stream encoding.IStream) error {
return err
}
- it.PrevTime = st.Add(it.PrevTimeDelta)
+ it.PrevTime = nt + xtime.UnixNano(it.PrevTimeDelta)
return nil
}
@@ -133,37 +140,35 @@ func (it *TimestampIterator) readNextTimestamp(stream encoding.IStream) error {
}
it.PrevTimeDelta += dod
- it.PrevTime = it.PrevTime.Add(it.PrevTimeDelta)
+ it.PrevTime = it.PrevTime + xtime.UnixNano(it.PrevTimeDelta)
return nil
}
func (it *TimestampIterator) tryReadMarker(stream encoding.IStream) (time.Duration, bool, error) {
- mes := it.Opts.MarkerEncodingScheme()
- numBits := mes.NumOpcodeBits() + mes.NumValueBits()
- opcodeAndValue, success := it.tryPeekBits(stream, numBits)
+ opcodeAndValue, success := it.tryPeekBits(stream, it.numBits)
if !success {
return 0, false, nil
}
- opcode := opcodeAndValue >> uint(mes.NumValueBits())
- if opcode != mes.Opcode() {
+ opcode := opcodeAndValue >> it.numValueBits
+ if opcode != it.markerEncodingScheme.Opcode() {
return 0, false, nil
}
var (
- valueMask = (1 << uint(mes.NumValueBits())) - 1
+ valueMask = (1 << it.numValueBits) - 1
markerValue = int64(opcodeAndValue & uint64(valueMask))
)
switch encoding.Marker(markerValue) {
- case mes.EndOfStream():
- _, err := stream.ReadBits(numBits)
+ case it.markerEncodingScheme.EndOfStream():
+ _, err := stream.ReadBits(it.numBits)
if err != nil {
return 0, false, err
}
it.Done = true
return 0, true, nil
- case mes.Annotation():
- _, err := stream.ReadBits(numBits)
+ case it.markerEncodingScheme.Annotation():
+ _, err := stream.ReadBits(it.numBits)
if err != nil {
return 0, false, err
}
@@ -176,8 +181,8 @@ func (it *TimestampIterator) tryReadMarker(stream encoding.IStream) (time.Durati
return 0, false, err
}
return markerOrDOD, true, nil
- case mes.TimeUnit():
- _, err := stream.ReadBits(numBits)
+ case it.markerEncodingScheme.TimeUnit():
+ _, err := stream.ReadBits(it.numBits)
if err != nil {
return 0, false, err
}
@@ -210,7 +215,7 @@ func (it *TimestampIterator) readMarkerOrDeltaOfDelta(stream encoding.IStream) (
}
}
- tes, exists := it.Opts.TimeEncodingSchemes()[it.TimeUnit]
+ tes, exists := it.Opts.TimeEncodingSchemes().SchemeForUnit(it.TimeUnit)
if !exists {
return 0, fmt.Errorf("time encoding scheme for time unit %v doesn't exist", it.TimeUnit)
}
@@ -249,12 +254,12 @@ func (it *TimestampIterator) readDeltaOfDelta(
cb = (cb << 1) | nextCB
if cb == buckets[i].Opcode() {
- dodBits, err := stream.ReadBits(buckets[i].NumValueBits())
+ dodBits, err := stream.ReadBits(uint(buckets[i].NumValueBits()))
if err != nil {
return 0, err
}
- dod := encoding.SignExtend(dodBits, buckets[i].NumValueBits())
+ dod := encoding.SignExtend(dodBits, uint(buckets[i].NumValueBits()))
timeUnit, err := it.TimeUnit.Value()
if err != nil {
return 0, nil
@@ -264,12 +269,11 @@ func (it *TimestampIterator) readDeltaOfDelta(
}
}
- numValueBits := tes.DefaultBucket().NumValueBits()
+ numValueBits := uint(tes.DefaultBucket().NumValueBits())
dodBits, err := stream.ReadBits(numValueBits)
if err != nil {
return 0, err
}
-
dod := encoding.SignExtend(dodBits, numValueBits)
timeUnit, err := it.TimeUnit.Value()
if err != nil {
@@ -312,7 +316,7 @@ func (it *TimestampIterator) readVarint(stream encoding.IStream) (int, error) {
return int(res), err
}
-func (it *TimestampIterator) tryPeekBits(stream encoding.IStream, numBits int) (uint64, bool) {
+func (it *TimestampIterator) tryPeekBits(stream encoding.IStream, numBits uint) (uint64, bool) {
res, err := stream.PeekBits(numBits)
if err != nil {
return 0, false
diff --git a/src/dbnode/encoding/multi_reader_iterator.go b/src/dbnode/encoding/multi_reader_iterator.go
index 5db68d7b42..c3e986f641 100644
--- a/src/dbnode/encoding/multi_reader_iterator.go
+++ b/src/dbnode/encoding/multi_reader_iterator.go
@@ -145,7 +145,7 @@ func (it *multiReaderIterator) moveIteratorsToNext() {
}
curr := it.iters.at()
- if !curr.Equal(prev) {
+ if curr != prev {
return
}
@@ -181,6 +181,10 @@ func (it *multiReaderIterator) ResetSliceOfSlices(slicesIter xio.ReaderSliceOfSl
it.moveToNext()
}
+func (it *multiReaderIterator) Schema() namespace.SchemaDescr {
+ return it.schemaDesc
+}
+
func (it *multiReaderIterator) Close() {
if it.isClosed() {
return
@@ -242,3 +246,7 @@ func (it *singleSlicesOfSlicesIterator) Size() (int, error) {
}
return size, nil
}
+
+func (it *singleSlicesOfSlicesIterator) Rewind() {
+ it.firstNext = true
+}
diff --git a/src/dbnode/encoding/multi_reader_iterator_test.go b/src/dbnode/encoding/multi_reader_iterator_test.go
index 3dda1440db..a82ab1549f 100644
--- a/src/dbnode/encoding/multi_reader_iterator_test.go
+++ b/src/dbnode/encoding/multi_reader_iterator_test.go
@@ -26,12 +26,12 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/x/xio"
xtime "github.com/m3db/m3/src/x/time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/m3db/m3/src/dbnode/namespace"
)
type testMultiReader struct {
diff --git a/src/dbnode/encoding/null.go b/src/dbnode/encoding/null.go
index 7d353310b1..39b734399a 100644
--- a/src/dbnode/encoding/null.go
+++ b/src/dbnode/encoding/null.go
@@ -51,6 +51,9 @@ func (e *nullEncoder) NumEncoded() int { return 0 }
func (e *nullEncoder) LastEncoded() (ts.Datapoint, error) {
return ts.Datapoint{}, fmt.Errorf("not implemented")
}
+func (e *nullEncoder) LastAnnotation() (ts.Annotation, error) {
+ return nil, fmt.Errorf("not implemented")
+}
func (e *nullEncoder) Len() int { return 0 }
func (e *nullEncoder) Seal() { e.sealed = true }
func (e *nullEncoder) Reset(t time.Time, capacity int, descr namespace.SchemaDescr) {}
diff --git a/src/dbnode/encoding/options.go b/src/dbnode/encoding/options.go
index e334689062..1016be3d53 100644
--- a/src/dbnode/encoding/options.go
+++ b/src/dbnode/encoding/options.go
@@ -30,7 +30,7 @@ import (
const (
defaultDefaultTimeUnit = xtime.Second
defaultByteFieldDictLRUSize = 4
- defaultIStreamReaderSizeM3TSZ = 16
+ defaultIStreamReaderSizeM3TSZ = 8 * 2
defaultIStreamReaderSizeProto = 128
)
@@ -56,7 +56,7 @@ type options struct {
func newOptions() Options {
return &options{
defaultTimeUnit: defaultDefaultTimeUnit,
- timeEncodingSchemes: defaultTimeEncodingSchemes,
+ timeEncodingSchemes: newTimeEncodingSchemes(defaultTimeEncodingSchemes),
markerEncodingScheme: defaultMarkerEncodingScheme,
byteFieldDictLRUSize: defaultByteFieldDictLRUSize,
iStreamReaderSizeM3TSZ: defaultIStreamReaderSizeM3TSZ,
@@ -79,9 +79,9 @@ func (o *options) DefaultTimeUnit() xtime.Unit {
return o.defaultTimeUnit
}
-func (o *options) SetTimeEncodingSchemes(value TimeEncodingSchemes) Options {
+func (o *options) SetTimeEncodingSchemes(value map[xtime.Unit]TimeEncodingScheme) Options {
opts := *o
- opts.timeEncodingSchemes = value
+ opts.timeEncodingSchemes = newTimeEncodingSchemes(value)
return &opts
}
diff --git a/src/dbnode/encoding/ostream.go b/src/dbnode/encoding/ostream.go
index 49eb710c78..3baeb4f2de 100644
--- a/src/dbnode/encoding/ostream.go
+++ b/src/dbnode/encoding/ostream.go
@@ -29,7 +29,7 @@ const (
initAllocSize = 1024
)
-// Ostream encapsulates a writable stream.
+// ostream encapsulates a writable stream.
type ostream struct {
// We want to use a checked.Bytes when transferring ownership of the buffer
// of the ostream. Unfortunately, the accounting overhead of going through
@@ -66,12 +66,10 @@ func NewOStream(
return stream
}
-// Len returns the length of the Ostream
func (os *ostream) Len() int {
return len(os.rawBuffer)
}
-// Empty returns whether the Ostream is empty
func (os *ostream) Empty() bool {
return os.Len() == 0 && os.pos == 0
}
@@ -132,7 +130,6 @@ func (os *ostream) fillUnused(v byte) {
os.rawBuffer[os.lastIndex()] |= v >> uint(os.pos)
}
-// WriteBit writes the last bit of v.
func (os *ostream) WriteBit(v Bit) {
v <<= 7
if !os.hasUnusedBits() {
@@ -143,7 +140,6 @@ func (os *ostream) WriteBit(v Bit) {
os.pos++
}
-// WriteByte writes the last byte of v.
func (os *ostream) WriteByte(v byte) {
if !os.hasUnusedBits() {
os.grow(v, 8)
@@ -153,7 +149,6 @@ func (os *ostream) WriteByte(v byte) {
os.grow(v< 0 && hash == lastState.hash {
- streamBytes, _ := enc.stream.Rawbytes()
+ streamBytes, _ := enc.stream.RawBytes()
match, err := enc.bytesMatchEncodedDictionaryValue(
streamBytes, lastState, val)
if err != nil {
@@ -609,7 +622,7 @@ func (enc *Encoder) encodeBytesValue(i int, val []byte) error {
// Bytes changed control bit.
enc.stream.WriteBit(opCodeChange)
- streamBytes, _ := enc.stream.Rawbytes()
+ streamBytes, _ := enc.stream.RawBytes()
for j, state := range customField.bytesFieldDict {
if hash != state.hash {
continue
@@ -662,7 +675,7 @@ func (enc *Encoder) encodeBytesValue(i int, val []byte) error {
enc.padToNextByte()
// Track the byte position we're going to start at so we can store it in the LRU after.
- streamBytes, _ = enc.stream.Rawbytes()
+ streamBytes, _ = enc.stream.RawBytes()
bytePos := len(streamBytes)
// Write the actual bytes.
@@ -800,7 +813,7 @@ func (enc *Encoder) bytesMatchEncodedDictionaryValue(
// reaches the beginning of the next byte. This allows us begin encoding data
// with the guarantee that we're aligned at a physical byte boundary.
func (enc *Encoder) padToNextByte() {
- _, bitPos := enc.stream.Rawbytes()
+ _, bitPos := enc.stream.RawBytes()
for bitPos%8 != 0 {
enc.stream.WriteBit(0)
bitPos++
diff --git a/src/dbnode/encoding/proto/int_encoder_iterator.go b/src/dbnode/encoding/proto/int_encoder_iterator.go
index 1daee6bd93..53dc4074eb 100644
--- a/src/dbnode/encoding/proto/int_encoder_iterator.go
+++ b/src/dbnode/encoding/proto/int_encoder_iterator.go
@@ -224,7 +224,7 @@ func (eit *intEncoderAndIterator) readIntValDiff(stream encoding.IStream) error
itErrPrefix, err)
}
- numSig := int(eit.intSigBitsTracker.NumSig)
+ numSig := uint(eit.intSigBitsTracker.NumSig)
diffSigBits, err := stream.ReadBits(numSig)
if err != nil {
return fmt.Errorf(
diff --git a/src/dbnode/encoding/proto/iterator.go b/src/dbnode/encoding/proto/iterator.go
index a5d4fd9457..54dbb68eda 100644
--- a/src/dbnode/encoding/proto/iterator.go
+++ b/src/dbnode/encoding/proto/iterator.go
@@ -224,7 +224,8 @@ func (it *iterator) Next() bool {
func (it *iterator) Current() (ts.Datapoint, xtime.Unit, ts.Annotation) {
var (
dp = ts.Datapoint{
- Timestamp: it.tsIterator.PrevTime,
+ Timestamp: it.tsIterator.PrevTime.ToTime(),
+ TimestampNanos: it.tsIterator.PrevTime,
}
unit = it.tsIterator.TimeUnit
)
@@ -335,7 +336,7 @@ func (it *iterator) readCustomFieldsSchema() error {
}
for i := 1; i <= int(numCustomFields); i++ {
- fieldTypeBits, err := it.stream.ReadBits(numBitsToEncodeCustomType)
+ fieldTypeBits, err := it.stream.ReadBits(uint(numBitsToEncodeCustomType))
if err != nil {
return err
}
@@ -545,7 +546,7 @@ func (it *iterator) readBytesValue(i int, customField customFieldState) error {
if valueInDictControlBit == opCodeInterpretSubsequentBitsAsLRUIndex {
dictIdxBits, err := it.stream.ReadBits(
- numBitsRequiredForNumUpToN(it.byteFieldDictLRUSize))
+ uint(numBitsRequiredForNumUpToN(it.byteFieldDictLRUSize)))
if err != nil {
return fmt.Errorf(
"%s error trying to read bytes dict idx: %v",
@@ -860,7 +861,7 @@ func (it *iterator) nextToBeEvicted(fieldIdx int) []byte {
return dict[0]
}
-func (it *iterator) readBits(numBits int) (uint64, error) {
+func (it *iterator) readBits(numBits uint) (uint64, error) {
res, err := it.stream.ReadBits(numBits)
if err != nil {
return 0, err
diff --git a/src/dbnode/encoding/scheme.go b/src/dbnode/encoding/scheme.go
index 16e2eef61e..efcbcee138 100644
--- a/src/dbnode/encoding/scheme.go
+++ b/src/dbnode/encoding/scheme.go
@@ -125,6 +125,21 @@ type timeEncodingScheme struct {
defaultBucket TimeBucket
}
+// newTimeEncodingSchemes converts the unit-to-scheme mapping
+// to the underlying TimeEncodingSchemes used for lookups.
+func newTimeEncodingSchemes(schemes map[xtime.Unit]TimeEncodingScheme) TimeEncodingSchemes {
+ encodingSchemes := make(TimeEncodingSchemes, xtime.UnitCount())
+ for k, v := range schemes {
+ if !k.IsValid() {
+ continue
+ }
+
+ encodingSchemes[k] = v
+ }
+
+ return encodingSchemes
+}
+
// newTimeEncodingScheme creates a new time encoding scheme.
// NB(xichen): numValueBitsForBbuckets should be ordered by value in ascending order (smallest value first).
func newTimeEncodingScheme(numValueBitsForBuckets []int, numValueBitsForDefault int) TimeEncodingScheme {
@@ -153,7 +168,22 @@ func (tes *timeEncodingScheme) Buckets() []TimeBucket { return tes.buckets }
func (tes *timeEncodingScheme) DefaultBucket() TimeBucket { return tes.defaultBucket }
// TimeEncodingSchemes defines the time encoding schemes for different time units.
-type TimeEncodingSchemes map[xtime.Unit]TimeEncodingScheme
+type TimeEncodingSchemes []TimeEncodingScheme
+
+// SchemeForUnit returns the corresponding TimeEncodingScheme for the provided unit.
+// Returns false if the unit does not match a scheme or is invalid.
+func (s TimeEncodingSchemes) SchemeForUnit(u xtime.Unit) (TimeEncodingScheme, bool) {
+ if !u.IsValid() || int(u) >= len(s) {
+ return nil, false
+ }
+
+ scheme := s[u]
+ if scheme == nil {
+ return nil, false
+ }
+
+ return s[u], true
+}
// Marker represents the markers.
type Marker byte
@@ -219,7 +249,7 @@ func newMarkerEncodingScheme(
tmp := NewOStream(checked.NewBytes(nil, nil), false, nil)
tmp.WriteBits(uint64(i)>>uint(8-pos), pos)
WriteSpecialMarker(tmp, scheme, endOfStream)
- rawBytes, _ := tmp.Rawbytes()
+ rawBytes, _ := tmp.RawBytes()
tail := checked.NewBytes(rawBytes, nil)
scheme.tails[i][j] = tail
}
diff --git a/src/dbnode/encoding/series_iterator.go b/src/dbnode/encoding/series_iterator.go
index 02f387b654..b27603eea4 100644
--- a/src/dbnode/encoding/series_iterator.go
+++ b/src/dbnode/encoding/series_iterator.go
@@ -32,8 +32,8 @@ type seriesIterator struct {
id ident.ID
nsID ident.ID
tags ident.TagIterator
- start time.Time
- end time.Time
+ start xtime.UnixNano
+ end xtime.UnixNano
iters iterators
multiReaderIters []MultiReaderIterator
err error
@@ -66,11 +66,11 @@ func (it *seriesIterator) Tags() ident.TagIterator {
}
func (it *seriesIterator) Start() time.Time {
- return it.start
+ return it.start.ToTime()
}
func (it *seriesIterator) End() time.Time {
- return it.end
+ return it.end.ToTime()
}
func (it *seriesIterator) Next() bool {
@@ -120,8 +120,8 @@ func (it *seriesIterator) Close() {
}
}
-func (it *seriesIterator) Replicas() []MultiReaderIterator {
- return it.multiReaderIters
+func (it *seriesIterator) Replicas() ([]MultiReaderIterator, error) {
+ return it.multiReaderIters, nil
}
func (it *seriesIterator) Reset(opts SeriesIteratorOptions) {
@@ -139,12 +139,21 @@ func (it *seriesIterator) Reset(opts SeriesIteratorOptions) {
it.iters.reset()
it.start = opts.StartInclusive
it.end = opts.EndExclusive
- if !it.start.IsZero() && !it.end.IsZero() {
+ if it.start != 0 && it.end != 0 {
it.iters.setFilter(it.start, it.end)
}
it.SetIterateEqualTimestampStrategy(opts.IterateEqualTimestampStrategy)
+ replicas := opts.Replicas
+ var err error
+ if consolidator := opts.SeriesIteratorConsolidator; consolidator != nil {
+ replicas, err = consolidator.ConsolidateReplicas(replicas)
+ if err != nil {
+ it.err = err
+ return
+ }
+ }
- for _, replica := range opts.Replicas {
+ for _, replica := range replicas {
if !replica.Next() || !it.iters.push(replica) {
if replica.Err() != nil {
it.err = replica.Err()
@@ -190,7 +199,7 @@ func (it *seriesIterator) moveToNext() {
}
curr := it.iters.at()
- if !curr.Equal(prev) {
+ if curr != prev {
return
}
diff --git a/src/dbnode/encoding/series_iterator_accumulator.go b/src/dbnode/encoding/series_iterator_accumulator.go
new file mode 100644
index 0000000000..ac5a9cdcb5
--- /dev/null
+++ b/src/dbnode/encoding/series_iterator_accumulator.go
@@ -0,0 +1,260 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package encoding
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+var _ SeriesIteratorAccumulator = (*seriesIteratorAccumulator)(nil)
+
+type seriesIteratorAccumulator struct {
+ id ident.ID
+ nsID ident.ID
+ start time.Time
+ end time.Time
+ iters iterators
+ tagIterator ident.TagIterator
+ seriesIterators []SeriesIterator
+ err error
+ firstNext bool
+ closed bool
+}
+
+// SeriesAccumulatorOptions are options for a SeriesIteratorAccumulator.
+type SeriesAccumulatorOptions struct {
+ // RetainTags determines if tags should be preserved after the accumulator is
+ // exhausted. If set to true, the accumulator retains a copy of the tags.
+ RetainTags bool
+}
+
+// NewSeriesIteratorAccumulator creates a new series iterator.
+func NewSeriesIteratorAccumulator(
+ iter SeriesIterator,
+ opts SeriesAccumulatorOptions,
+) (SeriesIteratorAccumulator, error) {
+ it := &seriesIteratorAccumulator{
+ // NB: clone id and nsID so that they will be accessbile after underlying
+ // iterators are closed.
+ id: ident.StringID(iter.ID().String()),
+ nsID: ident.StringID(iter.Namespace().String()),
+ seriesIterators: make([]SeriesIterator, 0, 2),
+ }
+
+ if opts.RetainTags {
+ it.tagIterator = iter.Tags().Duplicate()
+ }
+
+ err := it.Add(iter)
+ if err != nil {
+ return nil, err
+ }
+
+ return it, nil
+}
+
+func (it *seriesIteratorAccumulator) Add(iter SeriesIterator) error {
+ if it.err != nil {
+ return it.err
+ }
+
+ if newNs := iter.Namespace(); !newNs.Equal(it.nsID) {
+ return fmt.Errorf("cannot add iterator with namespace %s to accumulator %s",
+ newNs.String(), it.nsID.String())
+ }
+
+ if !iter.Next() || !it.iters.push(iter) {
+ iter.Close()
+ return iter.Err()
+ }
+
+ iterStart := iter.Start()
+ if start := it.start; start.IsZero() || iterStart.Before(start) {
+ it.start = iterStart
+ }
+
+ iterEnd := iter.End()
+ if end := it.end; end.IsZero() || iterEnd.After(end) {
+ it.end = iterEnd
+ }
+
+ it.seriesIterators = append(it.seriesIterators, iter)
+ return nil
+}
+
+func (it *seriesIteratorAccumulator) ID() ident.ID {
+ return it.id
+}
+
+func (it *seriesIteratorAccumulator) Namespace() ident.ID {
+ return it.nsID
+}
+
+func (it *seriesIteratorAccumulator) Tags() ident.TagIterator {
+ if iter := it.tagIterator; iter != nil {
+ return iter
+ }
+ if len(it.seriesIterators) == 0 {
+ return ident.EmptyTagIterator
+ }
+ // NB: the tags for each iterator must be the same, so it's valid to return
+ // from whichever iterator is available.
+ return it.seriesIterators[0].Tags()
+}
+
+func (it *seriesIteratorAccumulator) Start() time.Time {
+ return it.start
+}
+
+func (it *seriesIteratorAccumulator) End() time.Time {
+ return it.end
+}
+
+func (it *seriesIteratorAccumulator) Next() bool {
+ if !it.firstNext {
+ if !it.hasNext() {
+ return false
+ }
+
+ it.moveToNext()
+ }
+
+ it.firstNext = false
+ return it.hasNext()
+}
+
+func (it *seriesIteratorAccumulator) Current() (ts.Datapoint, xtime.Unit, ts.Annotation) {
+ return it.iters.current()
+}
+
+func (it *seriesIteratorAccumulator) Err() error {
+ if it.err != nil {
+ return it.err
+ }
+
+ for _, iter := range it.seriesIterators {
+ if err := iter.Err(); err != nil {
+ it.err = err
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (it *seriesIteratorAccumulator) Close() {
+ if it.isClosed() {
+ return
+ }
+ it.closed = true
+ if it.id != nil {
+ it.id.Finalize()
+ it.id = nil
+ }
+ if it.nsID != nil {
+ it.nsID.Finalize()
+ it.nsID = nil
+ }
+ if it.tagIterator != nil {
+ it.tagIterator.Close()
+ it.tagIterator = nil
+ }
+ it.iters.reset()
+}
+
+func (it *seriesIteratorAccumulator) Replicas() ([]MultiReaderIterator, error) {
+ if l := len(it.seriesIterators); l != 1 {
+ return nil, fmt.Errorf("cannot get replicas for accumulated series "+
+ "iterators: need 1 iterator, have %d", l)
+ }
+ return it.seriesIterators[0].Replicas()
+}
+
+func (it *seriesIteratorAccumulator) Reset(SeriesIteratorOptions) {
+ if it.err == nil {
+ it.err = errors.New("cannot reset a series accumulator")
+ }
+ return
+}
+
+func (it *seriesIteratorAccumulator) SetIterateEqualTimestampStrategy(
+ strategy IterateEqualTimestampStrategy,
+) {
+ it.iters.equalTimesStrategy = strategy
+ for _, iter := range it.seriesIterators {
+ iter.SetIterateEqualTimestampStrategy(strategy)
+ }
+}
+
+func (it *seriesIteratorAccumulator) hasError() bool {
+ return it.err != nil
+}
+
+func (it *seriesIteratorAccumulator) isClosed() bool {
+ return it.closed
+}
+
+func (it *seriesIteratorAccumulator) hasMore() bool {
+ return it.iters.len() > 0
+}
+
+func (it *seriesIteratorAccumulator) hasNext() bool {
+ return !it.hasError() && !it.isClosed() && it.hasMore()
+}
+
+func (it *seriesIteratorAccumulator) moveToNext() {
+ for {
+ prev := it.iters.at()
+ next, err := it.iters.moveToValidNext()
+ if err != nil {
+ it.err = err
+ return
+ }
+ if !next {
+ return
+ }
+
+ curr := it.iters.at()
+ if curr != prev {
+ return
+ }
+
+ // Dedupe by continuing
+ }
+}
+
+func (it *seriesIteratorAccumulator) Stats() (SeriesIteratorStats, error) {
+ approx := 0
+ for _, iter := range it.seriesIterators {
+ stats, err := iter.Stats()
+ if err != nil {
+ return SeriesIteratorStats{}, err
+ }
+ approx += stats.ApproximateSizeInBytes
+ }
+ return SeriesIteratorStats{ApproximateSizeInBytes: approx}, nil
+}
diff --git a/src/dbnode/encoding/series_iterator_accumulator_test.go b/src/dbnode/encoding/series_iterator_accumulator_test.go
new file mode 100644
index 0000000000..e00a5520d6
--- /dev/null
+++ b/src/dbnode/encoding/series_iterator_accumulator_test.go
@@ -0,0 +1,294 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package encoding
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type testAccumulatorSeries struct {
+ id string
+ nsID string
+ retainTag bool
+ start time.Time
+ end time.Time
+ input []accumulatorInput
+ expected []testValue
+ expectedErr *testSeriesErr
+}
+
+type accumulatorInput struct {
+ values []testValue
+ id string
+ err error
+}
+
+func TestSeriesIteratorAccumulator(t *testing.T) {
+ testSeriesIteratorAccumulator(t, false)
+}
+
+func TestSeriesIteratorAccumulatorRetainTag(t *testing.T) {
+ testSeriesIteratorAccumulator(t, true)
+}
+
+func testSeriesIteratorAccumulator(t *testing.T, retain bool) {
+ start := time.Now().Truncate(time.Minute)
+ end := start.Add(time.Minute)
+
+ values := []accumulatorInput{
+ {
+ values: []testValue{
+ {1.0, start.Add(1 * time.Second), xtime.Second, []byte{1, 2, 3}},
+ {2.0, start.Add(2 * time.Second), xtime.Second, nil},
+ {3.0, start.Add(6 * time.Second), xtime.Second, nil},
+ },
+ id: "foo1",
+ },
+ {
+ values: []testValue{
+ {1.0, start.Add(1 * time.Second), xtime.Second, []byte{1, 2, 3}},
+ {2.0, start.Add(2 * time.Second), xtime.Second, nil},
+ {3.0, start.Add(3 * time.Second), xtime.Second, nil},
+ },
+ id: "foo2",
+ },
+ {
+ values: []testValue{
+ {3.0, start.Add(1 * time.Millisecond), xtime.Second, nil},
+ {4.0, start.Add(4 * time.Second), xtime.Second, nil},
+ {5.0, start.Add(5 * time.Second), xtime.Second, nil},
+ },
+ id: "foo3",
+ },
+ }
+
+ ex := []testValue{
+ {3.0, start.Add(1 * time.Millisecond), xtime.Second, nil},
+ {1.0, start.Add(1 * time.Second), xtime.Second, []byte{1, 2, 3}},
+ {2.0, start.Add(2 * time.Second), xtime.Second, nil},
+ {3.0, start.Add(3 * time.Second), xtime.Second, nil},
+ {4.0, start.Add(4 * time.Second), xtime.Second, nil},
+ {5.0, start.Add(5 * time.Second), xtime.Second, nil},
+ {3.0, start.Add(6 * time.Second), xtime.Second, nil},
+ }
+
+ test := testAccumulatorSeries{
+ id: "foo1",
+ nsID: "bar",
+ start: start,
+ end: end,
+ retainTag: retain,
+ input: values,
+ expected: ex,
+ }
+
+ assertTestSeriesAccumulatorIterator(t, test)
+}
+
+func TestSingleSeriesIteratorAccumulator(t *testing.T) {
+ start := time.Now().Truncate(time.Minute)
+ end := start.Add(time.Minute)
+
+ values := []accumulatorInput{
+ {
+ values: []testValue{
+ {1.0, start.Add(1 * time.Second), xtime.Second, []byte{1, 2, 3}},
+ {2.0, start.Add(2 * time.Second), xtime.Second, nil},
+ {3.0, start.Add(6 * time.Second), xtime.Second, nil},
+ },
+ id: "foobar",
+ },
+ }
+
+ ex := []testValue{
+ {1.0, start.Add(1 * time.Second), xtime.Second, []byte{1, 2, 3}},
+ {2.0, start.Add(2 * time.Second), xtime.Second, nil},
+ {3.0, start.Add(6 * time.Second), xtime.Second, nil},
+ }
+
+ test := testAccumulatorSeries{
+ id: "foobar",
+ nsID: "bar",
+ start: start,
+ end: end,
+ input: values,
+ expected: ex,
+ }
+
+ assertTestSeriesAccumulatorIterator(t, test)
+}
+
+type newTestSeriesAccumulatorIteratorResult struct {
+ iter *seriesIteratorAccumulator
+ seriesIters []SeriesIterator
+}
+
+func newTestSeriesAccumulatorIterator(
+ t *testing.T,
+ series testAccumulatorSeries,
+) newTestSeriesAccumulatorIteratorResult {
+ iters := make([]SeriesIterator, 0, len(series.input))
+ var acc SeriesIteratorAccumulator
+ for _, r := range series.input {
+ multiIter := newTestMultiIterator(
+ r.values,
+ r.err,
+ )
+
+ iter := NewSeriesIterator(SeriesIteratorOptions{
+ ID: ident.StringID(r.id),
+ Namespace: ident.StringID(series.nsID),
+ Tags: ident.NewTagsIterator(ident.NewTags(
+ ident.StringTag("foo", "bar"), ident.StringTag("qux", "quz"),
+ )),
+ StartInclusive: xtime.ToUnixNano(series.start),
+ EndExclusive: xtime.ToUnixNano(series.end),
+ Replicas: []MultiReaderIterator{multiIter},
+ }, nil)
+
+ iters = append(iters, iter)
+ if acc == nil {
+ a, err := NewSeriesIteratorAccumulator(iter, SeriesAccumulatorOptions{
+ RetainTags: series.retainTag,
+ })
+ require.NoError(t, err)
+ acc = a
+ } else {
+ err := acc.Add(iter)
+ require.NoError(t, err)
+ }
+ }
+
+ accumulator, ok := acc.(*seriesIteratorAccumulator)
+ require.True(t, ok)
+ return newTestSeriesAccumulatorIteratorResult{
+ iter: accumulator,
+ seriesIters: iters,
+ }
+}
+
+func assertTestSeriesAccumulatorIterator(
+ t *testing.T,
+ series testAccumulatorSeries,
+) {
+ newSeriesIter := newTestSeriesAccumulatorIterator(t, series)
+ iter := newSeriesIter.iter
+
+ checkTags := func() {
+ tags := iter.Tags()
+ if tags == nil {
+ return
+ }
+ require.True(t, tags.Next())
+ assert.True(t, tags.Current().Equal(ident.StringTag("foo", "bar")))
+ require.True(t, tags.Next())
+ assert.True(t, tags.Current().Equal(ident.StringTag("qux", "quz")))
+ assert.False(t, tags.Next())
+ assert.NoError(t, tags.Err())
+ tags.Rewind()
+ }
+
+ checkTags()
+ assert.Equal(t, series.id, iter.ID().String())
+ assert.Equal(t, series.nsID, iter.Namespace().String())
+ assert.Equal(t, series.start, iter.Start())
+ assert.Equal(t, series.end, iter.End())
+ for i := 0; i < len(series.expected); i++ {
+ next := iter.Next()
+ if series.expectedErr != nil && i == series.expectedErr.atIdx {
+ assert.Equal(t, false, next)
+ break
+ }
+ require.Equal(t, true, next)
+ dp, unit, annotation := iter.Current()
+ expected := series.expected[i]
+ assert.Equal(t, expected.value, dp.Value)
+ assert.Equal(t, expected.t, dp.Timestamp)
+ assert.Equal(t, expected.unit, unit)
+ assert.Equal(t, expected.annotation, []byte(annotation))
+ checkTags()
+ }
+ // Ensure further calls to next false
+ for i := 0; i < 2; i++ {
+ assert.Equal(t, false, iter.Next())
+ }
+ if series.expectedErr == nil {
+ assert.NoError(t, iter.Err())
+ } else {
+ assert.Equal(t, series.expectedErr.err, iter.Err())
+ }
+
+ var tagIter ident.TagIterator
+ if series.retainTag {
+ checkTags()
+ tagIter = iter.Tags()
+ } else {
+ assert.Nil(t, iter.Tags())
+ }
+
+ assert.Equal(t, series.id, iter.id.String())
+ assert.Equal(t, series.nsID, iter.nsID.String())
+ iter.Close()
+ if series.retainTag {
+ // Check that the tag iterator was closed.
+ assert.False(t, tagIter.Next())
+ }
+}
+
+func TestAccumulatorMocked(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ start := time.Now()
+ base := NewMockSeriesIterator(ctrl)
+ base.EXPECT().ID().Return(ident.StringID("base")).AnyTimes()
+ base.EXPECT().Namespace().Return(ident.StringID("ns")).AnyTimes()
+ base.EXPECT().Next().Return(true)
+ dp := ts.Datapoint{TimestampNanos: xtime.ToUnixNano(start), Value: 88}
+ base.EXPECT().Current().Return(dp, xtime.Second, nil).AnyTimes()
+ base.EXPECT().Next().Return(false)
+ base.EXPECT().Start().Return(start)
+ base.EXPECT().End().Return(start.Add(time.Hour))
+ base.EXPECT().Err().Return(nil).AnyTimes()
+ base.EXPECT().Close()
+
+ it, err := NewSeriesIteratorAccumulator(base, SeriesAccumulatorOptions{})
+ require.NoError(t, err)
+
+ i := 0
+ for it.Next() {
+ ac, _, _ := it.Current()
+ assert.Equal(t, dp, ac)
+ i++
+ }
+
+ assert.Equal(t, 1, i)
+ it.Close()
+}
diff --git a/src/dbnode/encoding/series_iterator_split_into_blocks_test.go b/src/dbnode/encoding/series_iterator_split_into_blocks_test.go
index 35c96e56f6..5ebc2aadd8 100644
--- a/src/dbnode/encoding/series_iterator_split_into_blocks_test.go
+++ b/src/dbnode/encoding/series_iterator_split_into_blocks_test.go
@@ -34,9 +34,9 @@ import (
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/m3db/m3/src/dbnode/namespace"
)
type Series struct {
@@ -86,8 +86,8 @@ func TestDeconstructAndReconstruct(t *testing.T) {
orig := encoding.NewSeriesIterator(encoding.SeriesIteratorOptions{
ID: ident.StringID("foo"),
Namespace: ident.StringID("namespace"),
- StartInclusive: start,
- EndExclusive: end,
+ StartInclusive: xtime.ToUnixNano(start),
+ EndExclusive: xtime.ToUnixNano(end),
Replicas: []encoding.MultiReaderIterator{multiReader},
}, nil)
@@ -96,8 +96,10 @@ func TestDeconstructAndReconstruct(t *testing.T) {
ID: orig.ID(),
}
+ replicas, err := orig.Replicas()
+ require.NoError(t, err)
// Collect all the replica per-block readers
- for _, replica := range orig.Replicas() {
+ for _, replica := range replicas {
perBlockSliceReaders := replica.Readers()
next := true
for next {
@@ -160,8 +162,8 @@ func TestDeconstructAndReconstruct(t *testing.T) {
ID: orig.ID(),
Namespace: orig.Namespace(),
Tags: orig.Tags(),
- StartInclusive: filterValuesStart,
- EndExclusive: filterValuesEnd,
+ StartInclusive: xtime.ToUnixNano(filterValuesStart),
+ EndExclusive: xtime.ToUnixNano(filterValuesEnd),
Replicas: block.Replicas,
}, nil)
diff --git a/src/dbnode/encoding/series_iterator_test.go b/src/dbnode/encoding/series_iterator_test.go
index 6c7472a4ae..ea60f8b30a 100644
--- a/src/dbnode/encoding/series_iterator_test.go
+++ b/src/dbnode/encoding/series_iterator_test.go
@@ -39,6 +39,7 @@ import (
type testSeries struct {
id string
nsID string
+ retainTag bool
start time.Time
end time.Time
input []inputReplica
@@ -84,13 +85,21 @@ func TestMultiReaderMergesReplicas(t *testing.T) {
},
}
+ expected := []testValue{
+ {1.0, start.Add(1 * time.Second), xtime.Second, []byte{1, 2, 3}},
+ {2.0, start.Add(2 * time.Second), xtime.Second, nil},
+ {3.0, start.Add(3 * time.Second), xtime.Second, nil},
+ {4.0, start.Add(4 * time.Second), xtime.Second, nil},
+ {5.0, start.Add(5 * time.Second), xtime.Second, nil},
+ }
+
test := testSeries{
id: "foo",
nsID: "bar",
start: start,
end: end,
input: values,
- expected: append(values[0].values, values[2].values[1:]...),
+ expected: expected,
}
assertTestSeriesIterator(t, test)
@@ -226,6 +235,43 @@ func TestSeriesIteratorSetIterateEqualTimestampStrategy(t *testing.T) {
DefaultIterateEqualTimestampStrategy)
}
+type testSeriesConsolidator struct {
+ iters []MultiReaderIterator
+}
+
+func (c *testSeriesConsolidator) ConsolidateReplicas(
+ _ []MultiReaderIterator,
+) ([]MultiReaderIterator, error) {
+ return c.iters, nil
+}
+
+func TestSeriesIteratorSetSeriesIteratorConsolidator(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ test := testSeries{
+ id: "foo",
+ nsID: "bar",
+ }
+
+ iter := newTestSeriesIterator(t, test).iter
+ newIter := NewMockMultiReaderIterator(ctrl)
+ newIter.EXPECT().Next().Return(true)
+ newIter.EXPECT().Current().Return(ts.Datapoint{}, xtime.Second, nil).Times(2)
+
+ iter.iters.setFilter(0, 1)
+ consolidator := &testSeriesConsolidator{iters: []MultiReaderIterator{newIter}}
+ oldIter := NewMockMultiReaderIterator(ctrl)
+ oldIters := []MultiReaderIterator{oldIter}
+ iter.multiReaderIters = oldIters
+ assert.Equal(t, oldIter, iter.multiReaderIters[0])
+ iter.Reset(SeriesIteratorOptions{
+ Replicas: oldIters,
+ SeriesIteratorConsolidator: consolidator,
+ })
+ assert.Equal(t, newIter, iter.multiReaderIters[0])
+}
+
type newTestSeriesIteratorResult struct {
iter *seriesIterator
multiReaderIterators []MultiReaderIterator
@@ -248,8 +294,8 @@ func newTestSeriesIterator(
ID: ident.StringID(series.id),
Namespace: ident.StringID(series.nsID),
Tags: ident.EmptyTagIterator,
- StartInclusive: series.start,
- EndExclusive: series.end,
+ StartInclusive: xtime.ToUnixNano(series.start),
+ EndExclusive: xtime.ToUnixNano(series.end),
Replicas: iters,
}, nil)
diff --git a/src/dbnode/encoding/types.go b/src/dbnode/encoding/types.go
index c66b301534..0b88c32b8e 100644
--- a/src/dbnode/encoding/types.go
+++ b/src/dbnode/encoding/types.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/x/checked"
- "github.com/m3db/m3/src/x/context"
+ xcontext "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
@@ -55,7 +55,7 @@ type Encoder interface {
// passed to this method is closed, so to avoid not returning the
// encoder's buffer back to the pool when it is completed be sure to call
// close on the context eventually.
- Stream(ctx context.Context) (xio.SegmentReader, bool)
+ Stream(ctx xcontext.Context) (xio.SegmentReader, bool)
// NumEncoded returns the number of encoded datapoints.
NumEncoded() int
@@ -65,6 +65,11 @@ type Encoder interface {
// an error is returned.
LastEncoded() (ts.Datapoint, error)
+ // LastAnnotation returns the last encoded datapoint, useful for
+ // de-duplicating encoded values. If there are no previously encoded values
+ // an error is returned.
+ LastAnnotation() (ts.Annotation, error)
+
// Len returns the length of the encoded stream as returned by a call to Stream().
Len() int
@@ -95,7 +100,7 @@ type Options interface {
DefaultTimeUnit() xtime.Unit
// SetTimeEncodingSchemes sets the time encoding schemes for different time units.
- SetTimeEncodingSchemes(value TimeEncodingSchemes) Options
+ SetTimeEncodingSchemes(value map[xtime.Unit]TimeEncodingScheme) Options
// TimeEncodingSchemes returns the time encoding schemes for different time units.
TimeEncodingSchemes() TimeEncodingSchemes
@@ -146,27 +151,31 @@ type Options interface {
// ByteFieldDictionaryLRUSize returns the ByteFieldDictionaryLRUSize.
ByteFieldDictionaryLRUSize() int
- // SetIStreamReaderSizeM3TSZ sets the istream bufio reader size for m3tsz encoding iteration.
+ // SetIStreamReaderSizeM3TSZ sets the istream bufio reader size
+ // for m3tsz encoding iteration.
SetIStreamReaderSizeM3TSZ(value int) Options
- // IStreamReaderSizeM3TSZ returns the istream bufio reader size for m3tsz encoding iteration.
+ // IStreamReaderSizeM3TSZ returns the istream bufio reader size
+ // for m3tsz encoding iteration.
IStreamReaderSizeM3TSZ() int
- // SetIStreamReaderSizeProto sets the istream bufio reader size for proto encoding iteration.
+ // SetIStreamReaderSizeProto sets the istream bufio reader size
+ // for proto encoding iteration.
SetIStreamReaderSizeProto(value int) Options
- // SetIStreamReaderSizeProto returns the istream bufio reader size for proto encoding iteration.
+ // SetIStreamReaderSizeProto returns the istream bufio reader size
+ // for proto encoding iteration.
IStreamReaderSizeProto() int
}
// Iterator is the generic interface for iterating over encoded data.
type Iterator interface {
- // Next moves to the next item
+ // Next moves to the next item.
Next() bool
- // Current returns the value as well as the annotation associated with the current datapoint.
- // Users should not hold on to the returned Annotation object as it may get invalidated when
- // the iterator calls Next().
+ // Current returns the value as well as the annotation associated with the
+ // current datapoint. Users should not hold on to the returned Annotation
+ // object as it may get invalidated when the iterator calls Next().
Current() (ts.Datapoint, xtime.Unit, ts.Annotation)
// Err returns the error encountered
@@ -180,7 +189,8 @@ type Iterator interface {
type ReaderIterator interface {
Iterator
- // Reset resets the iterator to read from a new reader with a new schema (for schema aware iterators).
+ // Reset resets the iterator to read from a new reader with
+ // a new schema (for schema aware iterators).
Reset(reader io.Reader, schema namespace.SchemaDescr)
}
@@ -205,6 +215,18 @@ type MultiReaderIterator interface {
// Readers exposes the underlying ReaderSliceOfSlicesIterator
// for this MultiReaderIterator.
Readers() xio.ReaderSliceOfSlicesIterator
+
+ // Schema exposes the underlying SchemaDescr for this MutliReaderIterator.
+ Schema() namespace.SchemaDescr
+}
+
+// SeriesIteratorAccumulator is an accumulator for SeriesIterator iterators,
+// that gathers incoming SeriesIterators and builds a unified SeriesIterator.
+type SeriesIteratorAccumulator interface {
+ SeriesIterator
+
+ // Add adds a series iterator.
+ Add(it SeriesIterator) error
}
// SeriesIterator is an iterator that iterates over a set of iterators from
@@ -220,9 +242,6 @@ type SeriesIterator interface {
// Namespace gets the namespace of the series.
Namespace() ident.ID
- // Tags returns an iterator over the tags associated with the ID.
- Tags() ident.TagIterator
-
// Start returns the start time filter specified for the iterator.
Start() time.Time
@@ -243,12 +262,15 @@ type SeriesIterator interface {
// from the iterator immediately.
SetIterateEqualTimestampStrategy(strategy IterateEqualTimestampStrategy)
+ // Stats provides information for this SeriesIterator.
+ Stats() (SeriesIteratorStats, error)
+
// Replicas exposes the underlying MultiReaderIterator slice
// for this SeriesIterator.
- Replicas() []MultiReaderIterator
+ Replicas() ([]MultiReaderIterator, error)
- // Stats provides information for this SeriesIterator.
- Stats() (SeriesIteratorStats, error)
+ // Tags returns an iterator over the tags associated with the ID.
+ Tags() ident.TagIterator
}
// SeriesIteratorStats contains information about a SeriesIterator.
@@ -258,40 +280,48 @@ type SeriesIteratorStats struct {
ApproximateSizeInBytes int
}
+// SeriesIteratorConsolidator optionally defines methods to consolidate series iterators.
+type SeriesIteratorConsolidator interface {
+ // ConsolidateReplicas consolidates MultiReaderIterator slices.
+ ConsolidateReplicas(replicas []MultiReaderIterator) ([]MultiReaderIterator, error)
+}
+
// SeriesIteratorOptions is a set of options for using a series iterator.
type SeriesIteratorOptions struct {
ID ident.ID
Namespace ident.ID
Tags ident.TagIterator
Replicas []MultiReaderIterator
- StartInclusive time.Time
- EndExclusive time.Time
+ StartInclusive xtime.UnixNano
+ EndExclusive xtime.UnixNano
IterateEqualTimestampStrategy IterateEqualTimestampStrategy
+ SeriesIteratorConsolidator SeriesIteratorConsolidator
}
-// SeriesIterators is a collection of SeriesIterator that can close all iterators
+// SeriesIterators is a collection of SeriesIterator that can
+// close all iterators.
type SeriesIterators interface {
- // Iters returns the array of series iterators
+ // Iters returns the array of series iterators.
Iters() []SeriesIterator
- // Len returns the length of the iters
+ // Len returns the count of iterators in the collection.
Len() int
- // Close closes all iterators contained
+ // Close closes all iterators contained within the collection.
Close()
}
-// MutableSeriesIterators is a mutable SeriesIterators
+// MutableSeriesIterators is a mutable SeriesIterators.
type MutableSeriesIterators interface {
SeriesIterators
- // Reset the iters collection to a size for reuse
+ // Reset the iters collection to a size for reuse.
Reset(size int)
- // Cap returns the capacity of the iters
+ // Cap returns the capacity of the iters.
Cap() int
- // SetAt an index a SeriesIterator
+ // SetAt sets a SeriesIterator to the given index.
SetAt(idx int, iter SeriesIterator)
}
@@ -301,7 +331,7 @@ type Decoder interface {
Decode(reader io.Reader) ReaderIterator
}
-// NewDecoderFn creates a new decoder
+// NewDecoderFn creates a new decoder.
type NewDecoderFn func() Decoder
// EncoderAllocate allocates an encoder for a pool.
@@ -312,119 +342,168 @@ type ReaderIteratorAllocate func(reader io.Reader, descr namespace.SchemaDescr)
// IStream encapsulates a readable stream.
type IStream interface {
+ // Read reads len(b) bytes.
Read([]byte) (int, error)
+
+ // ReadBit reads the next Bit.
ReadBit() (Bit, error)
+
+ // ReadByte reads the next Byte.
ReadByte() (byte, error)
- ReadBits(numBits int) (uint64, error)
- PeekBits(numBits int) (uint64, error)
- RemainingBitsInCurrentByte() int
+
+ // ReadBits reads the next Bits.
+ ReadBits(numBits uint) (uint64, error)
+
+ // PeekBits looks at the next Bits, but doesn't move the pos.
+ PeekBits(numBits uint) (uint64, error)
+
+ // RemainingBitsInCurrentByte returns the number of bits remaining to
+ // be read in the current byte.
+ RemainingBitsInCurrentByte() uint
+
+ // Reset resets the IStream.
Reset(r io.Reader)
}
// OStream encapsulates a writable stream.
type OStream interface {
+ // Len returns the length of the OStream
Len() int
+ // Empty returns whether the OStream is empty
Empty() bool
+
+ // WriteBit writes the last bit of v.
WriteBit(v Bit)
+
+ // WriteBits writes the lowest numBits of v to the stream, starting
+ // from the most significant bit to the least significant bit.
WriteBits(v uint64, numBits int)
+
+ // WriteByte writes the last byte of v.
WriteByte(v byte)
+
+ // WriteBytes writes a byte slice.
WriteBytes(bytes []byte)
+
+ // Write writes a byte slice. This method exists in addition to WriteBytes()
+ // to satisfy the io.Writer interface.
Write(bytes []byte) (int, error)
+
+ // Reset resets the ostream.
Reset(buffer checked.Bytes)
+
+ // Discard takes the ref to the checked bytes from the OStream.
Discard() checked.Bytes
- Rawbytes() ([]byte, int) // TODO: rename this RawBytes
+
+ // RawBytes returns the OStream's raw bytes. Note that this does not transfer
+ // ownership of the data and bypasses the checked.Bytes accounting so
+ // callers should:
+ // 1. Only use the returned slice as a "read-only" snapshot of the
+ // data in a context where the caller has at least a read lock
+ // on the ostream itself.
+ // 2. Use this function with care.
+ RawBytes() ([]byte, int)
+
+ // CheckedBytes returns the written stream as checked bytes.
CheckedBytes() (checked.Bytes, int)
}
-// EncoderPool provides a pool for encoders
+// EncoderPool provides a pool for encoders.
type EncoderPool interface {
// Init initializes the pool.
Init(alloc EncoderAllocate)
- // Get provides an encoder from the pool
+ // Get provides an encoder from the pool.
Get() Encoder
- // Put returns an encoder to the pool
+ // Put returns an encoder to the pool.
Put(e Encoder)
}
-// ReaderIteratorPool provides a pool for ReaderIterators
+// ReaderIteratorPool provides a pool for ReaderIterators.
type ReaderIteratorPool interface {
// Init initializes the pool.
Init(alloc ReaderIteratorAllocate)
- // Get provides a ReaderIterator from the pool
+ // Get provides a ReaderIterator from the pool.
Get() ReaderIterator
- // Put returns a ReaderIterator to the pool
+ // Put returns a ReaderIterator to the pool.
Put(iter ReaderIterator)
}
-// MultiReaderIteratorPool provides a pool for MultiReaderIterators
+// MultiReaderIteratorPool provides a pool for MultiReaderIterators.
type MultiReaderIteratorPool interface {
// Init initializes the pool.
Init(alloc ReaderIteratorAllocate)
- // Get provides a MultiReaderIterator from the pool
+ // Get provides a MultiReaderIterator from the pool.
Get() MultiReaderIterator
- // Put returns a MultiReaderIterator to the pool
+ // Put returns a MultiReaderIterator to the pool.
Put(iter MultiReaderIterator)
}
-// SeriesIteratorPool provides a pool for SeriesIterator
+// SeriesIteratorPool provides a pool for SeriesIterator.
type SeriesIteratorPool interface {
- // Init initializes the pool
+ // Init initializes the pool.
Init()
- // Get provides a SeriesIterator from the pool
+ // Get provides a SeriesIterator from the pool.
Get() SeriesIterator
- // Put returns a SeriesIterator to the pool
+ // Put returns a SeriesIterator to the pool.
Put(iter SeriesIterator)
}
-// MutableSeriesIteratorsPool provides a pool for MutableSeriesIterators
+// MutableSeriesIteratorsPool provides a pool for MutableSeriesIterators.
type MutableSeriesIteratorsPool interface {
- // Init initializes the pool
+ // Init initializes the pool.
Init()
- // Get provides a MutableSeriesIterators from the pool
+ // Get provides a MutableSeriesIterators from the pool.
Get(size int) MutableSeriesIterators
- // Put returns a MutableSeriesIterators to the pool
+ // Put returns a MutableSeriesIterators to the pool.
Put(iters MutableSeriesIterators)
}
-// MultiReaderIteratorArrayPool provides a pool for MultiReaderIterator arrays
+// MultiReaderIteratorArrayPool provides a pool for MultiReaderIterator arrays.
type MultiReaderIteratorArrayPool interface {
- // Init initializes the pool
+ // Init initializes the pool.
Init()
- // Get provides a Iterator array from the pool
+ // Get provides a MultiReaderIterator array from the pool.
Get(size int) []MultiReaderIterator
- // Put returns a Iterator array to the pool
+ // Put returns a MultiReaderIterator array to the pool.
Put(iters []MultiReaderIterator)
}
-// IteratorPools exposes a small subset of iterator pools that are sufficient for clients
-// to rebuild SeriesIterator
+// IteratorPools exposes a small subset of iterator pools that are sufficient
+// for clients to rebuild SeriesIterator.
type IteratorPools interface {
- // MultiReaderIteratorArray exposes the session's MultiReaderIteratorArrayPool
+ // MultiReaderIteratorArray exposes the session MultiReaderIteratorArrayPool.
MultiReaderIteratorArray() MultiReaderIteratorArrayPool
- // MultiReaderIterator exposes the session's MultiReaderIteratorPool
+
+ // MultiReaderIterator exposes the session MultiReaderIteratorPool.
MultiReaderIterator() MultiReaderIteratorPool
- // MutableSeriesIterators exposes the session's MutableSeriesIteratorsPool
+
+ // MutableSeriesIterators exposes the session MutableSeriesIteratorsPool.
MutableSeriesIterators() MutableSeriesIteratorsPool
- // SeriesIterator exposes the session's SeriesIteratorPool
+
+ // SeriesIterator exposes the session SeriesIteratorPool.
SeriesIterator() SeriesIteratorPool
- // CheckedBytesWrapper exposes the session's CheckedBytesWrapperPool
+
+ // CheckedBytesWrapper exposes the session CheckedBytesWrapperPool.
CheckedBytesWrapper() xpool.CheckedBytesWrapperPool
- // ID exposes the session's identity pool
+
+ // ID exposes the session identity pool.
ID() ident.Pool
- // TagEncoder exposes the session's tag encoder pool
+
+ // TagEncoder exposes the session tag encoder pool.
TagEncoder() serialize.TagEncoderPool
- // TagDecoder exposes the session's tag decoder pool
+
+ // TagDecoder exposes the session tag decoder pool.
TagDecoder() serialize.TagDecoderPool
}
diff --git a/src/dbnode/environment/config.go b/src/dbnode/environment/config.go
index e4076e7aea..02a3dd5c2c 100644
--- a/src/dbnode/environment/config.go
+++ b/src/dbnode/environment/config.go
@@ -194,10 +194,11 @@ func (c ConfigureResults) SyncCluster() (ConfigureResult, error) {
// ConfigurationParameters are options used to create new ConfigureResults
type ConfigurationParameters struct {
- InstrumentOpts instrument.Options
- HashingSeed uint32
- HostID string
- NewDirectoryMode os.FileMode
+ InstrumentOpts instrument.Options
+ HashingSeed uint32
+ HostID string
+ NewDirectoryMode os.FileMode
+ ForceColdWritesEnabled bool
}
// UnmarshalYAML normalizes the config into a list of services.
@@ -295,7 +296,8 @@ func (c Configuration) configureDynamic(cfgParams ConfigurationParameters) (Conf
dynamicOpts := namespace.NewDynamicOptions().
SetInstrumentOptions(cfgParams.InstrumentOpts).
SetConfigServiceClient(configSvcClient).
- SetNamespaceRegistryKey(kvconfig.NamespacesKey)
+ SetNamespaceRegistryKey(kvconfig.NamespacesKey).
+ SetForceColdWritesEnabled(cfgParams.ForceColdWritesEnabled)
nsInit := namespace.NewDynamicInitializer(dynamicOpts)
serviceID := services.NewServiceID().
@@ -349,6 +351,10 @@ func (c Configuration) configureStatic(cfgParams ConfigurationParameters) (Confi
}
nsList = append(nsList, md)
}
+ // NB(bodu): Force cold writes to be enabled for all ns if specified.
+ if cfgParams.ForceColdWritesEnabled {
+ nsList = namespace.ForceColdWritesEnabledForMetadatas(nsList)
+ }
nsInitStatic := namespace.NewStaticInitializer(nsList)
diff --git a/src/dbnode/generated-source-files.mk b/src/dbnode/generated-source-files.mk
index fc1c7d29be..400a7c68c4 100644
--- a/src/dbnode/generated-source-files.mk
+++ b/src/dbnode/generated-source-files.mk
@@ -302,7 +302,7 @@ genny-list-all: \
genny-list-storage-id:
cd $(m3x_package_path) && make genny-pooled-elem-list-gen \
pkg=storage \
- value_type=ident.ID \
+ value_type=doc.Document \
rename_type_prefix=id \
rename_type_middle=ID \
target_package=github.com/m3db/m3/src/dbnode/storage
diff --git a/src/dbnode/generated/mocks/generate.go b/src/dbnode/generated/mocks/generate.go
index 7c8d6ac8da..0dc98bc96b 100644
--- a/src/dbnode/generated/mocks/generate.go
+++ b/src/dbnode/generated/mocks/generate.go
@@ -42,7 +42,7 @@
//go:generate sh -c "mockgen -package=namespace -destination=$GOPATH/src/$PACKAGE/src/dbnode/namespace/namespace_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/namespace/types.go"
//go:generate sh -c "mockgen -package=kvadmin -destination=$GOPATH/src/$PACKAGE/src/dbnode/namespace/kvadmin/kvadmin_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/namespace/kvadmin/types.go"
//go:generate sh -c "mockgen -package=runtime -destination=$GOPATH/src/$PACKAGE/src/dbnode/runtime/runtime_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/runtime/types.go"
-//go:generate sh -c "mockgen -package=ts -destination=$GOPATH/src/$PACKAGE/src/dbnode/ts/write_batch_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/ts/types.go"
+//go:generate sh -c "mockgen -package=writes -destination=$GOPATH/src/$PACKAGE/src/dbnode/ts/writes/write_batch_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/ts/writes/types.go"
//go:generate sh -c "mockgen -package=index -destination=$GOPATH/src/$PACKAGE/src/dbnode/storage/index/index_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/storage/index/types.go"
package mocks
diff --git a/src/dbnode/generated/proto/index/index.pb.go b/src/dbnode/generated/proto/index/index.pb.go
index ee9a3e3342..b81ca162ec 100644
--- a/src/dbnode/generated/proto/index/index.pb.go
+++ b/src/dbnode/generated/proto/index/index.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/dbnode/generated/proto/index/index.proto
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -28,7 +28,7 @@
github.com/m3db/m3/src/dbnode/generated/proto/index/index.proto
It has these top-level messages:
- IndexInfo
+ IndexVolumeInfo
SegmentInfo
SegmentFileInfo
IndexDigests
@@ -40,6 +40,7 @@ package index
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
+import google_protobuf "github.com/gogo/protobuf/types"
import io "io"
@@ -54,70 +55,78 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
-type IndexInfo struct {
- MajorVersion int64 `protobuf:"varint,1,opt,name=majorVersion,proto3" json:"majorVersion,omitempty"`
- BlockStart int64 `protobuf:"varint,2,opt,name=blockStart,proto3" json:"blockStart,omitempty"`
- BlockSize int64 `protobuf:"varint,3,opt,name=blockSize,proto3" json:"blockSize,omitempty"`
- FileType int64 `protobuf:"varint,4,opt,name=fileType,proto3" json:"fileType,omitempty"`
- Shards []uint32 `protobuf:"varint,5,rep,packed,name=shards" json:"shards,omitempty"`
- SnapshotTime int64 `protobuf:"varint,6,opt,name=snapshotTime,proto3" json:"snapshotTime,omitempty"`
- Segments []*SegmentInfo `protobuf:"bytes,7,rep,name=segments" json:"segments,omitempty"`
+type IndexVolumeInfo struct {
+ MajorVersion int64 `protobuf:"varint,1,opt,name=majorVersion,proto3" json:"majorVersion,omitempty"`
+ BlockStart int64 `protobuf:"varint,2,opt,name=blockStart,proto3" json:"blockStart,omitempty"`
+ BlockSize int64 `protobuf:"varint,3,opt,name=blockSize,proto3" json:"blockSize,omitempty"`
+ FileType int64 `protobuf:"varint,4,opt,name=fileType,proto3" json:"fileType,omitempty"`
+ Shards []uint32 `protobuf:"varint,5,rep,packed,name=shards" json:"shards,omitempty"`
+ SnapshotTime int64 `protobuf:"varint,6,opt,name=snapshotTime,proto3" json:"snapshotTime,omitempty"`
+ Segments []*SegmentInfo `protobuf:"bytes,7,rep,name=segments" json:"segments,omitempty"`
+ IndexVolumeType *google_protobuf.StringValue `protobuf:"bytes,8,opt,name=indexVolumeType" json:"indexVolumeType,omitempty"`
}
-func (m *IndexInfo) Reset() { *m = IndexInfo{} }
-func (m *IndexInfo) String() string { return proto.CompactTextString(m) }
-func (*IndexInfo) ProtoMessage() {}
-func (*IndexInfo) Descriptor() ([]byte, []int) { return fileDescriptorIndex, []int{0} }
+func (m *IndexVolumeInfo) Reset() { *m = IndexVolumeInfo{} }
+func (m *IndexVolumeInfo) String() string { return proto.CompactTextString(m) }
+func (*IndexVolumeInfo) ProtoMessage() {}
+func (*IndexVolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptorIndex, []int{0} }
-func (m *IndexInfo) GetMajorVersion() int64 {
+func (m *IndexVolumeInfo) GetMajorVersion() int64 {
if m != nil {
return m.MajorVersion
}
return 0
}
-func (m *IndexInfo) GetBlockStart() int64 {
+func (m *IndexVolumeInfo) GetBlockStart() int64 {
if m != nil {
return m.BlockStart
}
return 0
}
-func (m *IndexInfo) GetBlockSize() int64 {
+func (m *IndexVolumeInfo) GetBlockSize() int64 {
if m != nil {
return m.BlockSize
}
return 0
}
-func (m *IndexInfo) GetFileType() int64 {
+func (m *IndexVolumeInfo) GetFileType() int64 {
if m != nil {
return m.FileType
}
return 0
}
-func (m *IndexInfo) GetShards() []uint32 {
+func (m *IndexVolumeInfo) GetShards() []uint32 {
if m != nil {
return m.Shards
}
return nil
}
-func (m *IndexInfo) GetSnapshotTime() int64 {
+func (m *IndexVolumeInfo) GetSnapshotTime() int64 {
if m != nil {
return m.SnapshotTime
}
return 0
}
-func (m *IndexInfo) GetSegments() []*SegmentInfo {
+func (m *IndexVolumeInfo) GetSegments() []*SegmentInfo {
if m != nil {
return m.Segments
}
return nil
}
+func (m *IndexVolumeInfo) GetIndexVolumeType() *google_protobuf.StringValue {
+ if m != nil {
+ return m.IndexVolumeType
+ }
+ return nil
+}
+
type SegmentInfo struct {
SegmentType string `protobuf:"bytes,1,opt,name=segmentType,proto3" json:"segmentType,omitempty"`
MajorVersion int64 `protobuf:"varint,2,opt,name=majorVersion,proto3" json:"majorVersion,omitempty"`
@@ -255,14 +264,14 @@ func (m *SegmentFileDigest) GetDigest() uint32 {
}
func init() {
- proto.RegisterType((*IndexInfo)(nil), "index.IndexInfo")
+ proto.RegisterType((*IndexVolumeInfo)(nil), "index.IndexVolumeInfo")
proto.RegisterType((*SegmentInfo)(nil), "index.SegmentInfo")
proto.RegisterType((*SegmentFileInfo)(nil), "index.SegmentFileInfo")
proto.RegisterType((*IndexDigests)(nil), "index.IndexDigests")
proto.RegisterType((*SegmentDigest)(nil), "index.SegmentDigest")
proto.RegisterType((*SegmentFileDigest)(nil), "index.SegmentFileDigest")
}
-func (m *IndexInfo) Marshal() (dAtA []byte, err error) {
+func (m *IndexVolumeInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
@@ -272,7 +281,7 @@ func (m *IndexInfo) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *IndexInfo) MarshalTo(dAtA []byte) (int, error) {
+func (m *IndexVolumeInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
@@ -331,6 +340,16 @@ func (m *IndexInfo) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
+ if m.IndexVolumeType != nil {
+ dAtA[i] = 0x42
+ i++
+ i = encodeVarintIndex(dAtA, i, uint64(m.IndexVolumeType.Size()))
+ n3, err := m.IndexVolumeType.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
return i, nil
}
@@ -519,7 +538,7 @@ func encodeVarintIndex(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return offset + 1
}
-func (m *IndexInfo) Size() (n int) {
+func (m *IndexVolumeInfo) Size() (n int) {
var l int
_ = l
if m.MajorVersion != 0 {
@@ -550,6 +569,10 @@ func (m *IndexInfo) Size() (n int) {
n += 1 + l + sovIndex(uint64(l))
}
}
+ if m.IndexVolumeType != nil {
+ l = m.IndexVolumeType.Size()
+ n += 1 + l + sovIndex(uint64(l))
+ }
return n
}
@@ -646,7 +669,7 @@ func sovIndex(x uint64) (n int) {
func sozIndex(x uint64) (n int) {
return sovIndex(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
-func (m *IndexInfo) Unmarshal(dAtA []byte) error {
+func (m *IndexVolumeInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -669,10 +692,10 @@ func (m *IndexInfo) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IndexInfo: wiretype end group for non-group")
+ return fmt.Errorf("proto: IndexVolumeInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IndexInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: IndexVolumeInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -863,6 +886,39 @@ func (m *IndexInfo) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IndexVolumeType", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIndex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthIndex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IndexVolumeType == nil {
+ m.IndexVolumeType = &google_protobuf.StringValue{}
+ }
+ if err := m.IndexVolumeType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipIndex(dAtA[iNdEx:])
@@ -1560,32 +1616,36 @@ func init() {
}
var fileDescriptorIndex = []byte{
- // 427 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x8e, 0xd3, 0x30,
- 0x18, 0x25, 0x09, 0x2d, 0xd3, 0x2f, 0x2d, 0x03, 0x16, 0x1a, 0x59, 0x08, 0x45, 0x51, 0x56, 0x59,
- 0xa0, 0x44, 0x9a, 0x2e, 0x41, 0x42, 0x42, 0x08, 0x69, 0xb6, 0x99, 0x81, 0xbd, 0x53, 0xbb, 0xa9,
- 0x21, 0xb1, 0xab, 0xd8, 0x48, 0xc0, 0x29, 0xb8, 0x12, 0x3b, 0x96, 0x1c, 0x01, 0x95, 0x33, 0xb0,
- 0x47, 0xfe, 0xa1, 0x4d, 0x5a, 0x16, 0xdd, 0x44, 0x79, 0x3f, 0x8e, 0xbf, 0xf7, 0x1c, 0xc3, 0xab,
- 0x86, 0xeb, 0xcd, 0xa7, 0xba, 0x58, 0xc9, 0xae, 0xec, 0x96, 0xb4, 0x2e, 0xbb, 0x65, 0xa9, 0xfa,
- 0x55, 0x49, 0x6b, 0x21, 0x29, 0x2b, 0x1b, 0x26, 0x58, 0x4f, 0x34, 0xa3, 0xe5, 0xb6, 0x97, 0x5a,
- 0x96, 0x5c, 0x50, 0xf6, 0xd9, 0x3d, 0x0b, 0xcb, 0xa0, 0x89, 0x05, 0xd9, 0x9f, 0x00, 0x66, 0x37,
- 0xe6, 0xed, 0x46, 0xac, 0x25, 0xca, 0x60, 0xde, 0x91, 0x0f, 0xb2, 0x7f, 0xcf, 0x7a, 0xc5, 0xa5,
- 0xc0, 0x41, 0x1a, 0xe4, 0x51, 0x35, 0xe2, 0x50, 0x02, 0x50, 0xb7, 0x72, 0xf5, 0xf1, 0x56, 0x93,
- 0x5e, 0xe3, 0xd0, 0x3a, 0x06, 0x0c, 0x7a, 0x06, 0x33, 0x87, 0xf8, 0x57, 0x86, 0x23, 0x2b, 0x1f,
- 0x08, 0xf4, 0x14, 0x2e, 0xd6, 0xbc, 0x65, 0x77, 0x5f, 0xb6, 0x0c, 0xdf, 0xb7, 0xe2, 0x1e, 0xa3,
- 0x2b, 0x98, 0xaa, 0x0d, 0xe9, 0xa9, 0xc2, 0x93, 0x34, 0xca, 0x17, 0x95, 0x47, 0x66, 0x2a, 0x25,
- 0xc8, 0x56, 0x6d, 0xa4, 0xbe, 0xe3, 0x1d, 0xc3, 0x53, 0x37, 0xd5, 0x90, 0x43, 0x05, 0x5c, 0x28,
- 0xd6, 0x74, 0x4c, 0x68, 0x85, 0x1f, 0xa4, 0x51, 0x1e, 0x5f, 0xa3, 0xc2, 0xc5, 0xbd, 0x75, 0xb4,
- 0xc9, 0x57, 0xed, 0x3d, 0xd9, 0xf7, 0x00, 0xe2, 0x81, 0x82, 0x52, 0x88, 0xbd, 0x66, 0x47, 0x33,
- 0xc1, 0x67, 0xd5, 0x90, 0x3a, 0xe9, 0x26, 0xfc, 0x4f, 0x37, 0xc6, 0xc3, 0xc5, 0xc1, 0x13, 0x79,
- 0xcf, 0x80, 0x33, 0x0d, 0x74, 0x4c, 0x13, 0x4a, 0x34, 0xb1, 0x0d, 0xcc, 0xab, 0x3d, 0x46, 0xcf,
- 0x61, 0x62, 0xda, 0x70, 0x05, 0xc4, 0xd7, 0x57, 0xe3, 0x08, 0x6f, 0x79, 0xcb, 0x6c, 0x0c, 0x67,
- 0xca, 0x5e, 0xc0, 0xe5, 0x91, 0x82, 0x72, 0xb8, 0x54, 0x07, 0x6a, 0x10, 0xe5, 0x98, 0xce, 0x5a,
- 0x98, 0xdb, 0x73, 0x7f, 0xc3, 0x1b, 0xa6, 0xb4, 0x32, 0xc7, 0xca, 0xc5, 0x5a, 0x3a, 0x68, 0x17,
- 0x2d, 0xaa, 0x01, 0x83, 0x5e, 0xc2, 0x43, 0xff, 0x09, 0xbf, 0x02, 0x87, 0x76, 0xc6, 0x27, 0xe3,
- 0x19, 0x9d, 0x58, 0x1d, 0x79, 0x33, 0x02, 0x8b, 0x91, 0xe1, 0x8c, 0xbe, 0x8b, 0x7f, 0x5d, 0xb8,
- 0x7d, 0xf0, 0x69, 0x17, 0x7e, 0x2f, 0xdf, 0xc6, 0x3b, 0x78, 0x7c, 0xa2, 0x9d, 0xdf, 0x87, 0xf9,
- 0xf9, 0xa8, 0xcb, 0x1e, 0xda, 0xec, 0x1e, 0xbd, 0x7e, 0xf4, 0x63, 0x97, 0x04, 0x3f, 0x77, 0x49,
- 0xf0, 0x6b, 0x97, 0x04, 0xdf, 0x7e, 0x27, 0xf7, 0xea, 0xa9, 0xbd, 0x40, 0xcb, 0xbf, 0x01, 0x00,
- 0x00, 0xff, 0xff, 0xb1, 0x2e, 0xe5, 0xaa, 0x83, 0x03, 0x00, 0x00,
+ // 484 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x8a, 0xdb, 0x30,
+ 0x10, 0xc6, 0x6b, 0xbb, 0x49, 0xb3, 0xe3, 0xa4, 0x69, 0x45, 0x59, 0xcc, 0xb2, 0x18, 0xe3, 0x93,
+ 0x0f, 0xc5, 0x86, 0xe4, 0xd8, 0x42, 0xa1, 0x94, 0x85, 0xbd, 0x3a, 0xdb, 0xdc, 0xe5, 0x58, 0x71,
+ 0xd4, 0xda, 0x92, 0x91, 0x14, 0xfa, 0xe7, 0x29, 0xfa, 0x4a, 0xa5, 0x97, 0x1e, 0xfb, 0x08, 0x25,
+ 0x7d, 0x91, 0x22, 0xc9, 0x9b, 0x38, 0xc9, 0x1e, 0xf6, 0x12, 0xf8, 0xbe, 0xf9, 0xe4, 0x99, 0xf9,
+ 0x65, 0xe0, 0x5d, 0x45, 0xd5, 0x66, 0x5b, 0xa4, 0x2b, 0xde, 0x64, 0xcd, 0xbc, 0x2c, 0xb2, 0x66,
+ 0x9e, 0x49, 0xb1, 0xca, 0xca, 0x82, 0xf1, 0x92, 0x64, 0x15, 0x61, 0x44, 0x60, 0x45, 0xca, 0xac,
+ 0x15, 0x5c, 0xf1, 0x8c, 0xb2, 0x92, 0x7c, 0xb5, 0xbf, 0xa9, 0x71, 0xd0, 0xc0, 0x88, 0xab, 0xb0,
+ 0xe2, 0xbc, 0xaa, 0x89, 0x8d, 0x15, 0xdb, 0x75, 0xf6, 0x45, 0xe0, 0xb6, 0x25, 0x42, 0xda, 0x58,
+ 0xfc, 0xcb, 0x85, 0xe9, 0xad, 0x4e, 0x2e, 0x79, 0xbd, 0x6d, 0xc8, 0x2d, 0x5b, 0x73, 0x14, 0xc3,
+ 0xb8, 0xc1, 0x9f, 0xb8, 0x58, 0x12, 0x21, 0x29, 0x67, 0x81, 0x13, 0x39, 0x89, 0x97, 0x1f, 0x79,
+ 0x28, 0x04, 0x28, 0x6a, 0xbe, 0xfa, 0xbc, 0x50, 0x58, 0xa8, 0xc0, 0x35, 0x89, 0x9e, 0x83, 0xae,
+ 0xe1, 0xc2, 0x2a, 0xfa, 0x9d, 0x04, 0x9e, 0x29, 0x1f, 0x0c, 0x74, 0x05, 0xa3, 0x35, 0xad, 0xc9,
+ 0xdd, 0xb7, 0x96, 0x04, 0x4f, 0x4d, 0x71, 0xaf, 0xd1, 0x25, 0x0c, 0xe5, 0x06, 0x8b, 0x52, 0x06,
+ 0x83, 0xc8, 0x4b, 0x26, 0x79, 0xa7, 0xf4, 0x54, 0x92, 0xe1, 0x56, 0x6e, 0xb8, 0xba, 0xa3, 0x0d,
+ 0x09, 0x86, 0x76, 0xaa, 0xbe, 0x87, 0x52, 0x18, 0x49, 0x52, 0x35, 0x84, 0x29, 0x19, 0x3c, 0x8b,
+ 0xbc, 0xc4, 0x9f, 0xa1, 0xd4, 0x42, 0x59, 0x58, 0x5b, 0xef, 0x97, 0xef, 0x33, 0xe8, 0x06, 0xa6,
+ 0xf4, 0xb0, 0xbc, 0x19, 0x67, 0x14, 0x39, 0x89, 0x3f, 0xbb, 0x4e, 0x2d, 0xb7, 0xf4, 0x9e, 0x5b,
+ 0xba, 0x50, 0x82, 0xb2, 0x6a, 0x89, 0xeb, 0x2d, 0xc9, 0x4f, 0x1f, 0xc5, 0x3f, 0x1d, 0xf0, 0x7b,
+ 0x1d, 0x50, 0x04, 0x7e, 0xd7, 0xc3, 0x7c, 0x53, 0x03, 0xbc, 0xc8, 0xfb, 0xd6, 0x19, 0x63, 0xf7,
+ 0x01, 0xc6, 0x3a, 0x43, 0xd9, 0x21, 0xe3, 0x75, 0x99, 0x9e, 0xa7, 0x49, 0x36, 0x44, 0xe1, 0x12,
+ 0x2b, 0x6c, 0x48, 0x8e, 0xf3, 0xbd, 0x46, 0xaf, 0x61, 0xa0, 0xa9, 0x5a, 0x90, 0xfe, 0xec, 0xf2,
+ 0x18, 0xc5, 0x0d, 0xad, 0xcd, 0xdf, 0x9d, 0xdb, 0x50, 0xfc, 0x06, 0xa6, 0x27, 0x15, 0x94, 0xc0,
+ 0x54, 0x1e, 0xac, 0xde, 0x2a, 0xa7, 0x76, 0x5c, 0xc3, 0xd8, 0x5c, 0xd1, 0x07, 0x5a, 0x11, 0xa9,
+ 0xa4, 0x3e, 0x0f, 0xca, 0xd6, 0xdc, 0x4a, 0xf3, 0x68, 0x92, 0xf7, 0x1c, 0xf4, 0x16, 0x9e, 0x77,
+ 0x9f, 0xe8, 0x5e, 0x04, 0xae, 0x99, 0xf1, 0xd5, 0xf1, 0x8c, 0xb6, 0x98, 0x9f, 0x64, 0x63, 0x0c,
+ 0x93, 0xa3, 0xc0, 0x23, 0x78, 0xa7, 0xf7, 0x2c, 0x6c, 0x9f, 0xe0, 0x9c, 0x45, 0xd7, 0xab, 0xa3,
+ 0xf1, 0x11, 0x5e, 0x9e, 0xd5, 0x1e, 0xcf, 0x43, 0x1f, 0x71, 0x69, 0x77, 0x77, 0xcd, 0xee, 0x9d,
+ 0x7a, 0xff, 0xe2, 0xf7, 0x2e, 0x74, 0xfe, 0xec, 0x42, 0xe7, 0xef, 0x2e, 0x74, 0x7e, 0xfc, 0x0b,
+ 0x9f, 0x14, 0x43, 0x73, 0x61, 0xf3, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x48, 0x32, 0xee, 0x47,
+ 0xf1, 0x03, 0x00, 0x00,
}
diff --git a/src/dbnode/generated/proto/index/index.proto b/src/dbnode/generated/proto/index/index.proto
index 193046b47d..c1a0c9a4bc 100644
--- a/src/dbnode/generated/proto/index/index.proto
+++ b/src/dbnode/generated/proto/index/index.proto
@@ -1,7 +1,10 @@
syntax = "proto3";
+
package index;
-message IndexInfo {
+import "google/protobuf/wrappers.proto";
+
+message IndexVolumeInfo {
int64 majorVersion = 1;
int64 blockStart = 2;
int64 blockSize = 3;
@@ -9,6 +12,7 @@ message IndexInfo {
repeated uint32 shards = 5;
int64 snapshotTime = 6;
repeated SegmentInfo segments = 7;
+ google.protobuf.StringValue indexVolumeType = 8;
}
message SegmentInfo {
@@ -36,4 +40,4 @@ message SegmentDigest {
message SegmentFileDigest {
string segmentFileType = 1;
uint32 digest = 2;
-}
\ No newline at end of file
+}
diff --git a/src/dbnode/generated/proto/namespace/namespace.pb.go b/src/dbnode/generated/proto/namespace/namespace.pb.go
index 4e817778c8..8d4ff1f104 100644
--- a/src/dbnode/generated/proto/namespace/namespace.pb.go
+++ b/src/dbnode/generated/proto/namespace/namespace.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/dbnode/generated/proto/namespace/namespace.proto
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -33,6 +33,7 @@
IndexOptions
NamespaceOptions
Registry
+ NamespaceRuntimeOptions
SchemaOptions
SchemaHistory
FileDescriptorSet
@@ -42,6 +43,7 @@ package namespace
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
+import google_protobuf "github.com/gogo/protobuf/types"
import io "io"
@@ -145,16 +147,17 @@ func (m *IndexOptions) GetBlockSizeNanos() int64 {
}
type NamespaceOptions struct {
- BootstrapEnabled bool `protobuf:"varint,1,opt,name=bootstrapEnabled,proto3" json:"bootstrapEnabled,omitempty"`
- FlushEnabled bool `protobuf:"varint,2,opt,name=flushEnabled,proto3" json:"flushEnabled,omitempty"`
- WritesToCommitLog bool `protobuf:"varint,3,opt,name=writesToCommitLog,proto3" json:"writesToCommitLog,omitempty"`
- CleanupEnabled bool `protobuf:"varint,4,opt,name=cleanupEnabled,proto3" json:"cleanupEnabled,omitempty"`
- RepairEnabled bool `protobuf:"varint,5,opt,name=repairEnabled,proto3" json:"repairEnabled,omitempty"`
- RetentionOptions *RetentionOptions `protobuf:"bytes,6,opt,name=retentionOptions" json:"retentionOptions,omitempty"`
- SnapshotEnabled bool `protobuf:"varint,7,opt,name=snapshotEnabled,proto3" json:"snapshotEnabled,omitempty"`
- IndexOptions *IndexOptions `protobuf:"bytes,8,opt,name=indexOptions" json:"indexOptions,omitempty"`
- SchemaOptions *SchemaOptions `protobuf:"bytes,9,opt,name=schemaOptions" json:"schemaOptions,omitempty"`
- ColdWritesEnabled bool `protobuf:"varint,10,opt,name=coldWritesEnabled,proto3" json:"coldWritesEnabled,omitempty"`
+ BootstrapEnabled bool `protobuf:"varint,1,opt,name=bootstrapEnabled,proto3" json:"bootstrapEnabled,omitempty"`
+ FlushEnabled bool `protobuf:"varint,2,opt,name=flushEnabled,proto3" json:"flushEnabled,omitempty"`
+ WritesToCommitLog bool `protobuf:"varint,3,opt,name=writesToCommitLog,proto3" json:"writesToCommitLog,omitempty"`
+ CleanupEnabled bool `protobuf:"varint,4,opt,name=cleanupEnabled,proto3" json:"cleanupEnabled,omitempty"`
+ RepairEnabled bool `protobuf:"varint,5,opt,name=repairEnabled,proto3" json:"repairEnabled,omitempty"`
+ RetentionOptions *RetentionOptions `protobuf:"bytes,6,opt,name=retentionOptions" json:"retentionOptions,omitempty"`
+ SnapshotEnabled bool `protobuf:"varint,7,opt,name=snapshotEnabled,proto3" json:"snapshotEnabled,omitempty"`
+ IndexOptions *IndexOptions `protobuf:"bytes,8,opt,name=indexOptions" json:"indexOptions,omitempty"`
+ SchemaOptions *SchemaOptions `protobuf:"bytes,9,opt,name=schemaOptions" json:"schemaOptions,omitempty"`
+ ColdWritesEnabled bool `protobuf:"varint,10,opt,name=coldWritesEnabled,proto3" json:"coldWritesEnabled,omitempty"`
+ RuntimeOptions *NamespaceRuntimeOptions `protobuf:"bytes,11,opt,name=runtimeOptions" json:"runtimeOptions,omitempty"`
}
func (m *NamespaceOptions) Reset() { *m = NamespaceOptions{} }
@@ -232,6 +235,13 @@ func (m *NamespaceOptions) GetColdWritesEnabled() bool {
return false
}
+func (m *NamespaceOptions) GetRuntimeOptions() *NamespaceRuntimeOptions {
+ if m != nil {
+ return m.RuntimeOptions
+ }
+ return nil
+}
+
type Registry struct {
Namespaces map[string]*NamespaceOptions `protobuf:"bytes,1,rep,name=namespaces" json:"namespaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`
}
@@ -248,11 +258,36 @@ func (m *Registry) GetNamespaces() map[string]*NamespaceOptions {
return nil
}
+type NamespaceRuntimeOptions struct {
+ WriteIndexingPerCPUConcurrency *google_protobuf.DoubleValue `protobuf:"bytes,1,opt,name=writeIndexingPerCPUConcurrency" json:"writeIndexingPerCPUConcurrency,omitempty"`
+ FlushIndexingPerCPUConcurrency *google_protobuf.DoubleValue `protobuf:"bytes,2,opt,name=flushIndexingPerCPUConcurrency" json:"flushIndexingPerCPUConcurrency,omitempty"`
+}
+
+func (m *NamespaceRuntimeOptions) Reset() { *m = NamespaceRuntimeOptions{} }
+func (m *NamespaceRuntimeOptions) String() string { return proto.CompactTextString(m) }
+func (*NamespaceRuntimeOptions) ProtoMessage() {}
+func (*NamespaceRuntimeOptions) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{4} }
+
+func (m *NamespaceRuntimeOptions) GetWriteIndexingPerCPUConcurrency() *google_protobuf.DoubleValue {
+ if m != nil {
+ return m.WriteIndexingPerCPUConcurrency
+ }
+ return nil
+}
+
+func (m *NamespaceRuntimeOptions) GetFlushIndexingPerCPUConcurrency() *google_protobuf.DoubleValue {
+ if m != nil {
+ return m.FlushIndexingPerCPUConcurrency
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*RetentionOptions)(nil), "namespace.RetentionOptions")
proto.RegisterType((*IndexOptions)(nil), "namespace.IndexOptions")
proto.RegisterType((*NamespaceOptions)(nil), "namespace.NamespaceOptions")
proto.RegisterType((*Registry)(nil), "namespace.Registry")
+ proto.RegisterType((*NamespaceRuntimeOptions)(nil), "namespace.NamespaceRuntimeOptions")
}
func (m *RetentionOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -460,6 +495,16 @@ func (m *NamespaceOptions) MarshalTo(dAtA []byte) (int, error) {
}
i++
}
+ if m.RuntimeOptions != nil {
+ dAtA[i] = 0x5a
+ i++
+ i = encodeVarintNamespace(dAtA, i, uint64(m.RuntimeOptions.Size()))
+ n4, err := m.RuntimeOptions.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
return i, nil
}
@@ -498,17 +543,55 @@ func (m *Registry) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintNamespace(dAtA, i, uint64(v.Size()))
- n4, err := v.MarshalTo(dAtA[i:])
+ n5, err := v.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n4
+ i += n5
}
}
}
return i, nil
}
+func (m *NamespaceRuntimeOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamespaceRuntimeOptions) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.WriteIndexingPerCPUConcurrency != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintNamespace(dAtA, i, uint64(m.WriteIndexingPerCPUConcurrency.Size()))
+ n6, err := m.WriteIndexingPerCPUConcurrency.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ if m.FlushIndexingPerCPUConcurrency != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintNamespace(dAtA, i, uint64(m.FlushIndexingPerCPUConcurrency.Size()))
+ n7, err := m.FlushIndexingPerCPUConcurrency.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ return i, nil
+}
+
func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -593,6 +676,10 @@ func (m *NamespaceOptions) Size() (n int) {
if m.ColdWritesEnabled {
n += 2
}
+ if m.RuntimeOptions != nil {
+ l = m.RuntimeOptions.Size()
+ n += 1 + l + sovNamespace(uint64(l))
+ }
return n
}
@@ -615,6 +702,20 @@ func (m *Registry) Size() (n int) {
return n
}
+func (m *NamespaceRuntimeOptions) Size() (n int) {
+ var l int
+ _ = l
+ if m.WriteIndexingPerCPUConcurrency != nil {
+ l = m.WriteIndexingPerCPUConcurrency.Size()
+ n += 1 + l + sovNamespace(uint64(l))
+ }
+ if m.FlushIndexingPerCPUConcurrency != nil {
+ l = m.FlushIndexingPerCPUConcurrency.Size()
+ n += 1 + l + sovNamespace(uint64(l))
+ }
+ return n
+}
+
func sovNamespace(x uint64) (n int) {
for {
n++
@@ -1169,6 +1270,39 @@ func (m *NamespaceOptions) Unmarshal(dAtA []byte) error {
}
}
m.ColdWritesEnabled = bool(v != 0)
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RuntimeOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNamespace
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNamespace
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RuntimeOptions == nil {
+ m.RuntimeOptions = &NamespaceRuntimeOptions{}
+ }
+ if err := m.RuntimeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipNamespace(dAtA[iNdEx:])
@@ -1363,6 +1497,122 @@ func (m *Registry) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *NamespaceRuntimeOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNamespace
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamespaceRuntimeOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamespaceRuntimeOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WriteIndexingPerCPUConcurrency", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNamespace
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNamespace
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.WriteIndexingPerCPUConcurrency == nil {
+ m.WriteIndexingPerCPUConcurrency = &google_protobuf.DoubleValue{}
+ }
+ if err := m.WriteIndexingPerCPUConcurrency.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FlushIndexingPerCPUConcurrency", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNamespace
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNamespace
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FlushIndexingPerCPUConcurrency == nil {
+ m.FlushIndexingPerCPUConcurrency = &google_protobuf.DoubleValue{}
+ }
+ if err := m.FlushIndexingPerCPUConcurrency.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNamespace(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNamespace
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipNamespace(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
@@ -1473,41 +1723,48 @@ func init() {
}
var fileDescriptorNamespace = []byte{
- // 575 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x51, 0x6b, 0xd3, 0x50,
- 0x14, 0xc7, 0x4d, 0xb3, 0xad, 0xed, 0x59, 0xe7, 0xe2, 0x45, 0xb0, 0x54, 0x28, 0xa3, 0x8a, 0x14,
- 0x91, 0x06, 0xdb, 0x17, 0x51, 0x18, 0xcc, 0xad, 0x0e, 0x41, 0x6a, 0xb9, 0x13, 0x84, 0xbd, 0xdd,
- 0x24, 0xa7, 0x6d, 0x58, 0x92, 0x1b, 0xee, 0xbd, 0xd1, 0xd5, 0xcf, 0xe0, 0x83, 0xdf, 0xc3, 0x2f,
- 0xe2, 0xa3, 0x1f, 0x41, 0xea, 0xd7, 0xf0, 0x41, 0x72, 0x63, 0xba, 0x24, 0x1d, 0x32, 0xf6, 0x52,
- 0x6e, 0xff, 0xe7, 0x77, 0xce, 0xb9, 0x3d, 0xff, 0x73, 0x0b, 0xa7, 0x73, 0x5f, 0x2d, 0x12, 0x67,
- 0xe0, 0xf2, 0xd0, 0x0e, 0x47, 0x9e, 0x63, 0x87, 0x23, 0x5b, 0x0a, 0xd7, 0xf6, 0x9c, 0x88, 0x7b,
- 0x68, 0xcf, 0x31, 0x42, 0xc1, 0x14, 0x7a, 0x76, 0x2c, 0xb8, 0xe2, 0x76, 0xc4, 0x42, 0x94, 0x31,
- 0x73, 0xf1, 0xea, 0x34, 0xd0, 0x11, 0xd2, 0x5c, 0x0b, 0x9d, 0x93, 0xdb, 0xd6, 0x94, 0xee, 0x02,
- 0x43, 0x96, 0x15, 0xec, 0x7d, 0x35, 0xc1, 0xa2, 0xa8, 0x30, 0x52, 0x3e, 0x8f, 0xde, 0xc7, 0xe9,
- 0xa7, 0x24, 0x43, 0xb8, 0x2f, 0x72, 0x6d, 0x8a, 0xc2, 0xe7, 0xde, 0x84, 0x45, 0x5c, 0xb6, 0x8d,
- 0x03, 0xa3, 0x6f, 0xd2, 0x6b, 0x63, 0xe4, 0x09, 0xdc, 0x75, 0x02, 0xee, 0x5e, 0x9c, 0xf9, 0x5f,
- 0x30, 0xa3, 0x6b, 0x9a, 0xae, 0xa8, 0xe4, 0x19, 0xdc, 0x73, 0x92, 0xd9, 0x0c, 0xc5, 0x9b, 0x44,
- 0x25, 0xe2, 0x1f, 0x6a, 0x6a, 0x74, 0x33, 0x40, 0xfa, 0xb0, 0x9f, 0x89, 0x53, 0x26, 0x55, 0xc6,
- 0x6e, 0x69, 0xb6, 0x2a, 0x6b, 0x32, 0xed, 0x74, 0xc2, 0x14, 0x1b, 0x5f, 0xc6, 0xbe, 0x58, 0xb6,
- 0xb7, 0x0f, 0x8c, 0x7e, 0x83, 0x56, 0x65, 0x72, 0x0e, 0xfd, 0x8a, 0x74, 0x34, 0x53, 0x28, 0x26,
- 0x5c, 0x1d, 0xb9, 0x2e, 0x4a, 0x59, 0xfc, 0xc5, 0x3b, 0xba, 0xd9, 0x8d, 0x79, 0x72, 0x08, 0x9d,
- 0x99, 0xbe, 0x3e, 0xbd, 0x6e, 0x7e, 0x75, 0x5d, 0xed, 0x3f, 0x44, 0x6f, 0x0a, 0xad, 0xb7, 0x91,
- 0x87, 0x97, 0xb9, 0x13, 0x6d, 0xa8, 0x63, 0xc4, 0x9c, 0x00, 0x3d, 0x3d, 0xfc, 0x06, 0xcd, 0xbf,
- 0xde, 0x74, 0xde, 0xbd, 0x3f, 0x26, 0x58, 0x93, 0xdc, 0xfb, 0xbc, 0xec, 0x53, 0xb0, 0x1c, 0xce,
- 0x95, 0x54, 0x82, 0xc5, 0xe3, 0x52, 0xfd, 0x0d, 0x9d, 0xf4, 0xa0, 0x35, 0x0b, 0x12, 0xb9, 0xc8,
- 0xb9, 0x9a, 0xe6, 0x4a, 0x5a, 0x6a, 0xea, 0x67, 0xe1, 0x2b, 0x94, 0x1f, 0xf8, 0x31, 0x0f, 0x43,
- 0x5f, 0xbd, 0xe3, 0x73, 0x6d, 0x6a, 0x83, 0x6e, 0x06, 0xd2, 0xab, 0xbb, 0x01, 0xb2, 0x28, 0x59,
- 0xf7, 0xde, 0xd2, 0x68, 0x45, 0x25, 0x8f, 0x61, 0x4f, 0x60, 0xcc, 0x7c, 0x91, 0x63, 0x99, 0xa1,
- 0x65, 0x91, 0x9c, 0x82, 0x25, 0x2a, 0x0b, 0xac, 0x6d, 0xdb, 0x1d, 0x3e, 0x1c, 0x5c, 0x3d, 0x9f,
- 0xea, 0x8e, 0xd3, 0x8d, 0xa4, 0x74, 0x83, 0x64, 0xc4, 0x62, 0xb9, 0xe0, 0x2a, 0x6f, 0x58, 0xcf,
- 0x36, 0xa8, 0x22, 0x93, 0x57, 0xd0, 0xf2, 0x0b, 0x2e, 0xb5, 0x1b, 0xba, 0xdd, 0x83, 0x42, 0xbb,
- 0xa2, 0x89, 0xb4, 0x04, 0x93, 0x43, 0xd8, 0xcb, 0x5e, 0x60, 0x9e, 0xdd, 0xd4, 0xd9, 0xed, 0x42,
- 0xf6, 0x59, 0x31, 0x4e, 0xcb, 0x78, 0x3a, 0x6b, 0x97, 0x07, 0xde, 0x47, 0x3d, 0xd6, 0xfc, 0xa2,
- 0x90, 0xcd, 0x7a, 0x23, 0xd0, 0xfb, 0x6e, 0x40, 0x83, 0xe2, 0xdc, 0x97, 0x4a, 0x2c, 0xc9, 0x31,
- 0xc0, 0xba, 0x49, 0xfa, 0x9a, 0xcd, 0xfe, 0xee, 0xf0, 0x51, 0x69, 0x48, 0x19, 0x38, 0x58, 0x2f,
- 0x8c, 0x1c, 0x47, 0x4a, 0x2c, 0x69, 0x21, 0xad, 0x73, 0x0e, 0xfb, 0x95, 0x30, 0xb1, 0xc0, 0xbc,
- 0xc0, 0xa5, 0xde, 0xa0, 0x26, 0x4d, 0x8f, 0xe4, 0x39, 0x6c, 0x7f, 0x62, 0x41, 0x82, 0x7a, 0x5b,
- 0xca, 0x4e, 0x54, 0x97, 0x91, 0x66, 0xe4, 0xcb, 0xda, 0x0b, 0xe3, 0xb5, 0xf5, 0x63, 0xd5, 0x35,
- 0x7e, 0xae, 0xba, 0xc6, 0xaf, 0x55, 0xd7, 0xf8, 0xf6, 0xbb, 0x7b, 0xc7, 0xd9, 0xd1, 0x7f, 0x53,
- 0xa3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x60, 0xfa, 0xeb, 0x0c, 0x42, 0x05, 0x00, 0x00,
+ // 679 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6e, 0xd3, 0x4c,
+ 0x14, 0xfd, 0x9c, 0xf4, 0x27, 0x9d, 0xf4, 0x27, 0xdf, 0x08, 0xa9, 0x51, 0x40, 0x51, 0x65, 0x10,
+ 0x8a, 0x10, 0x8a, 0x45, 0xfa, 0x82, 0x40, 0xaa, 0x54, 0xd2, 0x52, 0x81, 0x50, 0x89, 0xa6, 0xfc,
+ 0x48, 0x7d, 0x1b, 0xdb, 0x37, 0x8e, 0x55, 0x7b, 0xc6, 0x9a, 0x19, 0xd3, 0x86, 0x35, 0xf0, 0xc0,
+ 0x3e, 0xd8, 0x48, 0x1f, 0x59, 0x02, 0x2a, 0x62, 0x1f, 0xc8, 0x33, 0x38, 0xb5, 0x9d, 0xb6, 0x54,
+ 0xbc, 0x44, 0xce, 0xb9, 0xe7, 0xde, 0x63, 0xdf, 0x73, 0x66, 0xd0, 0x41, 0x10, 0xaa, 0x49, 0xea,
+ 0xf6, 0x3d, 0x1e, 0x3b, 0xf1, 0xb6, 0xef, 0x3a, 0xf1, 0xb6, 0x23, 0x85, 0xe7, 0xf8, 0x2e, 0xe3,
+ 0x3e, 0x38, 0x01, 0x30, 0x10, 0x54, 0x81, 0xef, 0x24, 0x82, 0x2b, 0xee, 0x30, 0x1a, 0x83, 0x4c,
+ 0xa8, 0x07, 0x97, 0x4f, 0x7d, 0x5d, 0xc1, 0x2b, 0x33, 0xa0, 0xd3, 0x0d, 0x38, 0x0f, 0x22, 0x30,
+ 0x2d, 0x6e, 0x3a, 0x76, 0x4e, 0x05, 0x4d, 0x12, 0x10, 0xd2, 0x50, 0x3b, 0x7b, 0xff, 0xaa, 0x29,
+ 0xbd, 0x09, 0xc4, 0xd4, 0x4c, 0xb1, 0xbf, 0xd4, 0x51, 0x8b, 0x80, 0x02, 0xa6, 0x42, 0xce, 0xde,
+ 0x26, 0xd9, 0xaf, 0xc4, 0x03, 0x74, 0x47, 0xe4, 0xd8, 0x08, 0x44, 0xc8, 0xfd, 0x43, 0xca, 0xb8,
+ 0x6c, 0x5b, 0x5b, 0x56, 0xaf, 0x4e, 0xae, 0xac, 0xe1, 0x87, 0x68, 0xdd, 0x8d, 0xb8, 0x77, 0x72,
+ 0x14, 0x7e, 0x06, 0xc3, 0xae, 0x69, 0x76, 0x05, 0xc5, 0x8f, 0xd1, 0xff, 0x6e, 0x3a, 0x1e, 0x83,
+ 0x78, 0x99, 0xaa, 0x54, 0xfc, 0xa1, 0xd6, 0x35, 0x75, 0xbe, 0x80, 0x7b, 0x68, 0xc3, 0x80, 0x23,
+ 0x2a, 0x95, 0xe1, 0x2e, 0x68, 0x6e, 0x15, 0xd6, 0xcc, 0x4c, 0x69, 0x8f, 0x2a, 0xba, 0x7f, 0x96,
+ 0x84, 0x62, 0xda, 0x5e, 0xdc, 0xb2, 0x7a, 0x0d, 0x52, 0x85, 0xf1, 0x31, 0xea, 0x55, 0xa0, 0xdd,
+ 0xb1, 0x02, 0x71, 0xc8, 0xd5, 0xae, 0xe7, 0x81, 0x94, 0xc5, 0x2f, 0x5e, 0xd2, 0x62, 0xb7, 0xe6,
+ 0xe3, 0x1d, 0xd4, 0x19, 0xeb, 0xd7, 0x27, 0x57, 0xed, 0x6f, 0x59, 0x4f, 0xbb, 0x81, 0x61, 0x8f,
+ 0xd0, 0xea, 0x2b, 0xe6, 0xc3, 0x59, 0xee, 0x44, 0x1b, 0x2d, 0x03, 0xa3, 0x6e, 0x04, 0xbe, 0x5e,
+ 0x7e, 0x83, 0xe4, 0x7f, 0x6f, 0xbb, 0x6f, 0xfb, 0x7c, 0x01, 0xb5, 0x0e, 0x73, 0xef, 0xf3, 0xb1,
+ 0x8f, 0x50, 0xcb, 0xe5, 0x5c, 0x49, 0x25, 0x68, 0xb2, 0x5f, 0x9a, 0x3f, 0x87, 0x63, 0x1b, 0xad,
+ 0x8e, 0xa3, 0x54, 0x4e, 0x72, 0x5e, 0x4d, 0xf3, 0x4a, 0x58, 0x66, 0xea, 0xa9, 0x08, 0x15, 0xc8,
+ 0x77, 0x7c, 0xc8, 0xe3, 0x38, 0x54, 0x6f, 0x78, 0xa0, 0x4d, 0x6d, 0x90, 0xf9, 0x42, 0xf6, 0xea,
+ 0x5e, 0x04, 0x94, 0xa5, 0x33, 0xed, 0x05, 0x4d, 0xad, 0xa0, 0xf8, 0x01, 0x5a, 0x13, 0x90, 0xd0,
+ 0x50, 0xe4, 0x34, 0x63, 0x68, 0x19, 0xc4, 0x07, 0xa8, 0x25, 0x2a, 0x01, 0xd6, 0xb6, 0x35, 0x07,
+ 0x77, 0xfb, 0x97, 0xc7, 0xab, 0x9a, 0x71, 0x32, 0xd7, 0x94, 0x25, 0x48, 0x32, 0x9a, 0xc8, 0x09,
+ 0x57, 0xb9, 0xe0, 0xb2, 0x49, 0x50, 0x05, 0xc6, 0xcf, 0xd1, 0x6a, 0x58, 0x70, 0xa9, 0xdd, 0xd0,
+ 0x72, 0x9b, 0x05, 0xb9, 0xa2, 0x89, 0xa4, 0x44, 0xc6, 0x3b, 0x68, 0xcd, 0x9c, 0xc0, 0xbc, 0x7b,
+ 0x45, 0x77, 0xb7, 0x0b, 0xdd, 0x47, 0xc5, 0x3a, 0x29, 0xd3, 0xb3, 0x5d, 0x7b, 0x3c, 0xf2, 0x3f,
+ 0xea, 0xb5, 0xe6, 0x2f, 0x8a, 0xcc, 0xae, 0xe7, 0x0a, 0xf8, 0x35, 0x5a, 0x17, 0x29, 0x53, 0x61,
+ 0x9c, 0x7b, 0xdf, 0x6e, 0x6a, 0x39, 0xbb, 0x20, 0x37, 0x8b, 0x07, 0x29, 0x31, 0x49, 0xa5, 0xd3,
+ 0xfe, 0x66, 0xa1, 0x06, 0x81, 0x20, 0x94, 0x4a, 0x4c, 0xf1, 0x10, 0xa1, 0xd9, 0x84, 0xec, 0x66,
+ 0xa8, 0xf7, 0x9a, 0x83, 0xfb, 0xa5, 0x85, 0x1b, 0xe2, 0xe5, 0x74, 0xb9, 0xcf, 0x94, 0x98, 0x92,
+ 0x42, 0x5b, 0xe7, 0x18, 0x6d, 0x54, 0xca, 0xb8, 0x85, 0xea, 0x27, 0x30, 0xd5, 0x69, 0x5c, 0x21,
+ 0xd9, 0x23, 0x7e, 0x82, 0x16, 0x3f, 0xd1, 0x28, 0x05, 0x9d, 0xbc, 0xb2, 0xab, 0xd5, 0x60, 0x13,
+ 0xc3, 0x7c, 0x56, 0x7b, 0x6a, 0xd9, 0xbf, 0x2c, 0xb4, 0x79, 0xcd, 0x97, 0x61, 0x1f, 0x75, 0x75,
+ 0x2c, 0xb5, 0x4d, 0x21, 0x0b, 0x46, 0x20, 0x86, 0xa3, 0xf7, 0x43, 0xce, 0xbc, 0x54, 0x08, 0x60,
+ 0x9e, 0xd1, 0x6f, 0x0e, 0xee, 0xf5, 0xcd, 0x25, 0xdc, 0xcf, 0x2f, 0xe1, 0xfe, 0x1e, 0x4f, 0xdd,
+ 0x08, 0x3e, 0x64, 0x2a, 0xe4, 0x2f, 0x33, 0x32, 0x15, 0x7d, 0x4a, 0xae, 0x57, 0xa9, 0xdd, 0x46,
+ 0xe5, 0xe6, 0x19, 0x2f, 0x5a, 0xe7, 0x17, 0x5d, 0xeb, 0xfb, 0x45, 0xd7, 0xfa, 0x71, 0xd1, 0xb5,
+ 0xbe, 0xfe, 0xec, 0xfe, 0xe7, 0x2e, 0xe9, 0x39, 0xdb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x18,
+ 0x2e, 0x5a, 0xd3, 0x96, 0x06, 0x00, 0x00,
}
diff --git a/src/dbnode/generated/proto/namespace/namespace.proto b/src/dbnode/generated/proto/namespace/namespace.proto
index 5839b7bb7c..17e82a57dd 100644
--- a/src/dbnode/generated/proto/namespace/namespace.proto
+++ b/src/dbnode/generated/proto/namespace/namespace.proto
@@ -1,6 +1,8 @@
syntax = "proto3";
+
package namespace;
+import "google/protobuf/wrappers.proto";
import "github.com/m3db/m3/src/dbnode/generated/proto/namespace/schema.proto";
message RetentionOptions {
@@ -19,18 +21,24 @@ message IndexOptions {
}
message NamespaceOptions {
- bool bootstrapEnabled = 1;
- bool flushEnabled = 2;
- bool writesToCommitLog = 3;
- bool cleanupEnabled = 4;
- bool repairEnabled = 5;
- RetentionOptions retentionOptions = 6;
- bool snapshotEnabled = 7;
- IndexOptions indexOptions = 8;
- SchemaOptions schemaOptions = 9;
- bool coldWritesEnabled = 10;
+ bool bootstrapEnabled = 1;
+ bool flushEnabled = 2;
+ bool writesToCommitLog = 3;
+ bool cleanupEnabled = 4;
+ bool repairEnabled = 5;
+ RetentionOptions retentionOptions = 6;
+ bool snapshotEnabled = 7;
+ IndexOptions indexOptions = 8;
+ SchemaOptions schemaOptions = 9;
+ bool coldWritesEnabled = 10;
+ NamespaceRuntimeOptions runtimeOptions = 11;
}
message Registry {
map namespaces = 1;
}
+
+message NamespaceRuntimeOptions {
+ google.protobuf.DoubleValue writeIndexingPerCPUConcurrency = 1;
+ google.protobuf.DoubleValue flushIndexingPerCPUConcurrency = 2;
+}
diff --git a/src/dbnode/generated/thrift/generate.sh b/src/dbnode/generated/thrift/generate.sh
index abe305627d..4b52ee0a92 100755
--- a/src/dbnode/generated/thrift/generate.sh
+++ b/src/dbnode/generated/thrift/generate.sh
@@ -8,6 +8,7 @@ docker run --rm hello-world >/dev/null
# generate files using dockerized thrift-gen
THRIFT_IMAGE_VERSION=${THRIFT_IMAGE_VERSION:-"quay.io/m3db/thrift-gen:0.1.0"}
+echo "Generating thrift files with image: $THRIFT_IMAGE_VERSION"
UID_FLAGS="-u $(id -u)"
if [[ -n "$BUILDKITE" ]]; then
diff --git a/src/dbnode/generated/thrift/rpc.thrift b/src/dbnode/generated/thrift/rpc.thrift
index 3b3a392b47..9f1674a50c 100644
--- a/src/dbnode/generated/thrift/rpc.thrift
+++ b/src/dbnode/generated/thrift/rpc.thrift
@@ -44,10 +44,8 @@ exception WriteBatchRawErrors {
service Node {
// Friendly not highly performant read/write endpoints
QueryResult query(1: QueryRequest req) throws (1: Error err)
- AggregateQueryRawResult aggregateRaw(1: AggregateQueryRawRequest req) throws (1: Error err)
AggregateQueryResult aggregate(1: AggregateQueryRequest req) throws (1: Error err)
FetchResult fetch(1: FetchRequest req) throws (1: Error err)
- FetchTaggedResult fetchTagged(1: FetchTaggedRequest req) throws (1: Error err)
void write(1: WriteRequest req) throws (1: Error err)
void writeTagged(1: WriteTaggedRequest req) throws (1: Error err)
@@ -55,7 +53,8 @@ service Node {
FetchBatchRawResult fetchBatchRaw(1: FetchBatchRawRequest req) throws (1: Error err)
FetchBatchRawResult fetchBatchRawV2(1: FetchBatchRawV2Request req) throws (1: Error err)
FetchBlocksRawResult fetchBlocksRaw(1: FetchBlocksRawRequest req) throws (1: Error err)
-
+ FetchTaggedResult fetchTagged(1: FetchTaggedRequest req) throws (1: Error err)
+ AggregateQueryRawResult aggregateRaw(1: AggregateQueryRawRequest req) throws (1: Error err)
FetchBlocksMetadataRawV2Result fetchBlocksMetadataRawV2(1: FetchBlocksMetadataRawV2Request req) throws (1: Error err)
void writeBatchRaw(1: WriteBatchRawRequest req) throws (1: WriteBatchRawErrors err)
void writeBatchRawV2(1: WriteBatchRawV2Request req) throws (1: WriteBatchRawErrors err)
@@ -78,6 +77,11 @@ service Node {
NodeWriteNewSeriesBackoffDurationResult setWriteNewSeriesBackoffDuration(1: NodeSetWriteNewSeriesBackoffDurationRequest req) throws (1: Error err)
NodeWriteNewSeriesLimitPerShardPerSecondResult getWriteNewSeriesLimitPerShardPerSecond() throws (1: Error err)
NodeWriteNewSeriesLimitPerShardPerSecondResult setWriteNewSeriesLimitPerShardPerSecond(1: NodeSetWriteNewSeriesLimitPerShardPerSecondRequest req) throws (1: Error err)
+
+ // Debug endpoints
+ DebugProfileStartResult debugProfileStart(1: DebugProfileStartRequest req) throws (1: Error err)
+ DebugProfileStopResult debugProfileStop(1: DebugProfileStopRequest req) throws (1: Error err)
+ DebugIndexMemorySegmentsResult debugIndexMemorySegments(1: DebugIndexMemorySegmentsRequest req) throws (1: Error err)
}
struct FetchRequest {
@@ -154,6 +158,7 @@ struct Segment {
2: required binary tail
3: optional i64 startTime
4: optional i64 blockSize
+ 5: optional i64 checksum
}
struct FetchTaggedRequest {
@@ -164,6 +169,8 @@ struct FetchTaggedRequest {
5: required bool fetchData
6: optional i64 limit
7: optional TimeType rangeTimeType = TimeType.UNIX_SECONDS
+ 8: optional bool requireExhaustive = false
+ 9: optional i64 docsLimit
}
struct FetchTaggedResult {
@@ -300,6 +307,7 @@ struct NodeHealthResult {
1: required bool ok
2: required string status
3: required bool bootstrapped
+ 4: optional map metadata
}
struct NodeBootstrappedResult {}
@@ -481,3 +489,31 @@ struct Query {
6: optional AllQuery all
7: optional FieldQuery field
}
+
+struct DebugProfileStartRequest {
+ 1: required string name
+ 2: required string filePathTemplate
+ 3: optional string interval
+ 4: optional string duration
+ 5: optional i64 debug
+ 6: optional i64 conditionalNumGoroutinesGreaterThan
+ 7: optional i64 conditionalNumGoroutinesLessThan
+ 8: optional bool conditionalIsOverloaded
+}
+
+struct DebugProfileStartResult {
+}
+
+struct DebugProfileStopRequest {
+ 1: required string name
+}
+
+struct DebugProfileStopResult {
+}
+
+struct DebugIndexMemorySegmentsRequest {
+ 1: required string directory
+}
+
+struct DebugIndexMemorySegmentsResult {
+}
diff --git a/src/dbnode/generated/thrift/rpc/rpc.go b/src/dbnode/generated/thrift/rpc/rpc.go
index 03e762f36a..811c33b172 100644
--- a/src/dbnode/generated/thrift/rpc/rpc.go
+++ b/src/dbnode/generated/thrift/rpc/rpc.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -2769,11 +2769,13 @@ func (p *Segments) String() string {
// - Tail
// - StartTime
// - BlockSize
+// - Checksum
type Segment struct {
Head []byte `thrift:"head,1,required" db:"head" json:"head"`
Tail []byte `thrift:"tail,2,required" db:"tail" json:"tail"`
StartTime *int64 `thrift:"startTime,3" db:"startTime" json:"startTime,omitempty"`
BlockSize *int64 `thrift:"blockSize,4" db:"blockSize" json:"blockSize,omitempty"`
+ Checksum *int64 `thrift:"checksum,5" db:"checksum" json:"checksum,omitempty"`
}
func NewSegment() *Segment {
@@ -2805,6 +2807,15 @@ func (p *Segment) GetBlockSize() int64 {
}
return *p.BlockSize
}
+
+var Segment_Checksum_DEFAULT int64
+
+func (p *Segment) GetChecksum() int64 {
+ if !p.IsSetChecksum() {
+ return Segment_Checksum_DEFAULT
+ }
+ return *p.Checksum
+}
func (p *Segment) IsSetStartTime() bool {
return p.StartTime != nil
}
@@ -2813,6 +2824,10 @@ func (p *Segment) IsSetBlockSize() bool {
return p.BlockSize != nil
}
+func (p *Segment) IsSetChecksum() bool {
+ return p.Checksum != nil
+}
+
func (p *Segment) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
@@ -2848,6 +2863,10 @@ func (p *Segment) Read(iprot thrift.TProtocol) error {
if err := p.ReadField4(iprot); err != nil {
return err
}
+ case 5:
+ if err := p.ReadField5(iprot); err != nil {
+ return err
+ }
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
@@ -2905,6 +2924,15 @@ func (p *Segment) ReadField4(iprot thrift.TProtocol) error {
return nil
}
+func (p *Segment) ReadField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.Checksum = &v
+ }
+ return nil
+}
+
func (p *Segment) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("Segment"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
@@ -2922,6 +2950,9 @@ func (p *Segment) Write(oprot thrift.TProtocol) error {
if err := p.writeField4(oprot); err != nil {
return err
}
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -2988,6 +3019,21 @@ func (p *Segment) writeField4(oprot thrift.TProtocol) (err error) {
return err
}
+func (p *Segment) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetChecksum() {
+ if err := oprot.WriteFieldBegin("checksum", thrift.I64, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:checksum: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.Checksum)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.checksum (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:checksum: ", p), err)
+ }
+ }
+ return err
+}
+
func (p *Segment) String() string {
if p == nil {
return ""
@@ -3003,14 +3049,18 @@ func (p *Segment) String() string {
// - FetchData
// - Limit
// - RangeTimeType
+// - RequireExhaustive
+// - DocsLimit
type FetchTaggedRequest struct {
- NameSpace []byte `thrift:"nameSpace,1,required" db:"nameSpace" json:"nameSpace"`
- Query []byte `thrift:"query,2,required" db:"query" json:"query"`
- RangeStart int64 `thrift:"rangeStart,3,required" db:"rangeStart" json:"rangeStart"`
- RangeEnd int64 `thrift:"rangeEnd,4,required" db:"rangeEnd" json:"rangeEnd"`
- FetchData bool `thrift:"fetchData,5,required" db:"fetchData" json:"fetchData"`
- Limit *int64 `thrift:"limit,6" db:"limit" json:"limit,omitempty"`
- RangeTimeType TimeType `thrift:"rangeTimeType,7" db:"rangeTimeType" json:"rangeTimeType,omitempty"`
+ NameSpace []byte `thrift:"nameSpace,1,required" db:"nameSpace" json:"nameSpace"`
+ Query []byte `thrift:"query,2,required" db:"query" json:"query"`
+ RangeStart int64 `thrift:"rangeStart,3,required" db:"rangeStart" json:"rangeStart"`
+ RangeEnd int64 `thrift:"rangeEnd,4,required" db:"rangeEnd" json:"rangeEnd"`
+ FetchData bool `thrift:"fetchData,5,required" db:"fetchData" json:"fetchData"`
+ Limit *int64 `thrift:"limit,6" db:"limit" json:"limit,omitempty"`
+ RangeTimeType TimeType `thrift:"rangeTimeType,7" db:"rangeTimeType" json:"rangeTimeType,omitempty"`
+ RequireExhaustive bool `thrift:"requireExhaustive,8" db:"requireExhaustive" json:"requireExhaustive,omitempty"`
+ DocsLimit *int64 `thrift:"docsLimit,9" db:"docsLimit" json:"docsLimit,omitempty"`
}
func NewFetchTaggedRequest() *FetchTaggedRequest {
@@ -3053,6 +3103,21 @@ var FetchTaggedRequest_RangeTimeType_DEFAULT TimeType = 0
func (p *FetchTaggedRequest) GetRangeTimeType() TimeType {
return p.RangeTimeType
}
+
+var FetchTaggedRequest_RequireExhaustive_DEFAULT bool = false
+
+func (p *FetchTaggedRequest) GetRequireExhaustive() bool {
+ return p.RequireExhaustive
+}
+
+var FetchTaggedRequest_DocsLimit_DEFAULT int64
+
+func (p *FetchTaggedRequest) GetDocsLimit() int64 {
+ if !p.IsSetDocsLimit() {
+ return FetchTaggedRequest_DocsLimit_DEFAULT
+ }
+ return *p.DocsLimit
+}
func (p *FetchTaggedRequest) IsSetLimit() bool {
return p.Limit != nil
}
@@ -3061,6 +3126,14 @@ func (p *FetchTaggedRequest) IsSetRangeTimeType() bool {
return p.RangeTimeType != FetchTaggedRequest_RangeTimeType_DEFAULT
}
+func (p *FetchTaggedRequest) IsSetRequireExhaustive() bool {
+ return p.RequireExhaustive != FetchTaggedRequest_RequireExhaustive_DEFAULT
+}
+
+func (p *FetchTaggedRequest) IsSetDocsLimit() bool {
+ return p.DocsLimit != nil
+}
+
func (p *FetchTaggedRequest) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
@@ -3114,6 +3187,14 @@ func (p *FetchTaggedRequest) Read(iprot thrift.TProtocol) error {
if err := p.ReadField7(iprot); err != nil {
return err
}
+ case 8:
+ if err := p.ReadField8(iprot); err != nil {
+ return err
+ }
+ case 9:
+ if err := p.ReadField9(iprot); err != nil {
+ return err
+ }
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
@@ -3208,6 +3289,24 @@ func (p *FetchTaggedRequest) ReadField7(iprot thrift.TProtocol) error {
return nil
}
+func (p *FetchTaggedRequest) ReadField8(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 8: ", err)
+ } else {
+ p.RequireExhaustive = v
+ }
+ return nil
+}
+
+func (p *FetchTaggedRequest) ReadField9(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+ } else {
+ p.DocsLimit = &v
+ }
+ return nil
+}
+
func (p *FetchTaggedRequest) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("FetchTaggedRequest"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
@@ -3234,6 +3333,12 @@ func (p *FetchTaggedRequest) Write(oprot thrift.TProtocol) error {
if err := p.writeField7(oprot); err != nil {
return err
}
+ if err := p.writeField8(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField9(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -3339,6 +3444,36 @@ func (p *FetchTaggedRequest) writeField7(oprot thrift.TProtocol) (err error) {
return err
}
+func (p *FetchTaggedRequest) writeField8(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRequireExhaustive() {
+ if err := oprot.WriteFieldBegin("requireExhaustive", thrift.BOOL, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:requireExhaustive: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(p.RequireExhaustive)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.requireExhaustive (8) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:requireExhaustive: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *FetchTaggedRequest) writeField9(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDocsLimit() {
+ if err := oprot.WriteFieldBegin("docsLimit", thrift.I64, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:docsLimit: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.DocsLimit)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.docsLimit (9) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:docsLimit: ", p), err)
+ }
+ }
+ return err
+}
+
func (p *FetchTaggedRequest) String() string {
if p == nil {
return ""
@@ -7599,10 +7734,12 @@ func (p *TruncateResult_) String() string {
// - Ok
// - Status
// - Bootstrapped
+// - Metadata
type NodeHealthResult_ struct {
- Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
- Status string `thrift:"status,2,required" db:"status" json:"status"`
- Bootstrapped bool `thrift:"bootstrapped,3,required" db:"bootstrapped" json:"bootstrapped"`
+ Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+ Status string `thrift:"status,2,required" db:"status" json:"status"`
+ Bootstrapped bool `thrift:"bootstrapped,3,required" db:"bootstrapped" json:"bootstrapped"`
+ Metadata map[string]string `thrift:"metadata,4" db:"metadata" json:"metadata,omitempty"`
}
func NewNodeHealthResult_() *NodeHealthResult_ {
@@ -7620,6 +7757,16 @@ func (p *NodeHealthResult_) GetStatus() string {
func (p *NodeHealthResult_) GetBootstrapped() bool {
return p.Bootstrapped
}
+
+var NodeHealthResult__Metadata_DEFAULT map[string]string
+
+func (p *NodeHealthResult_) GetMetadata() map[string]string {
+ return p.Metadata
+}
+func (p *NodeHealthResult_) IsSetMetadata() bool {
+ return p.Metadata != nil
+}
+
func (p *NodeHealthResult_) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
@@ -7653,6 +7800,10 @@ func (p *NodeHealthResult_) Read(iprot thrift.TProtocol) error {
return err
}
issetBootstrapped = true
+ case 4:
+ if err := p.ReadField4(iprot); err != nil {
+ return err
+ }
default:
if err := iprot.Skip(fieldTypeId); err != nil {
return err
@@ -7704,6 +7855,34 @@ func (p *NodeHealthResult_) ReadField3(iprot thrift.TProtocol) error {
return nil
}
+func (p *NodeHealthResult_) ReadField4(iprot thrift.TProtocol) error {
+ _, _, size, err := iprot.ReadMapBegin()
+ if err != nil {
+ return thrift.PrependError("error reading map begin: ", err)
+ }
+ tMap := make(map[string]string, size)
+ p.Metadata = tMap
+ for i := 0; i < size; i++ {
+ var _key22 string
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 0: ", err)
+ } else {
+ _key22 = v
+ }
+ var _val23 string
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 0: ", err)
+ } else {
+ _val23 = v
+ }
+ p.Metadata[_key22] = _val23
+ }
+ if err := iprot.ReadMapEnd(); err != nil {
+ return thrift.PrependError("error reading map end: ", err)
+ }
+ return nil
+}
+
func (p *NodeHealthResult_) Write(oprot thrift.TProtocol) error {
if err := oprot.WriteStructBegin("NodeHealthResult"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
@@ -7718,6 +7897,9 @@ func (p *NodeHealthResult_) Write(oprot thrift.TProtocol) error {
if err := p.writeField3(oprot); err != nil {
return err
}
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -7767,6 +7949,32 @@ func (p *NodeHealthResult_) writeField3(oprot thrift.TProtocol) (err error) {
return err
}
+func (p *NodeHealthResult_) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMetadata() {
+ if err := oprot.WriteFieldBegin("metadata", thrift.MAP, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:metadata: ", p), err)
+ }
+ if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Metadata)); err != nil {
+ return thrift.PrependError("error writing map begin: ", err)
+ }
+ for k, v := range p.Metadata {
+ if err := oprot.WriteString(string(k)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err)
+ }
+ if err := oprot.WriteString(string(v)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err)
+ }
+ }
+ if err := oprot.WriteMapEnd(); err != nil {
+ return thrift.PrependError("error writing map end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:metadata: ", p), err)
+ }
+ }
+ return err
+}
+
func (p *NodeHealthResult_) String() string {
if p == nil {
return ""
@@ -9300,13 +9508,13 @@ func (p *AggregateQueryRawRequest) ReadField6(iprot thrift.TProtocol) error {
tSlice := make([][]byte, 0, size)
p.TagNameFilter = tSlice
for i := 0; i < size; i++ {
- var _elem22 []byte
+ var _elem24 []byte
if v, err := iprot.ReadBinary(); err != nil {
return thrift.PrependError("error reading field 0: ", err)
} else {
- _elem22 = v
+ _elem24 = v
}
- p.TagNameFilter = append(p.TagNameFilter, _elem22)
+ p.TagNameFilter = append(p.TagNameFilter, _elem24)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -9575,11 +9783,11 @@ func (p *AggregateQueryRawResult_) ReadField1(iprot thrift.TProtocol) error {
tSlice := make([]*AggregateQueryRawResultTagNameElement, 0, size)
p.Results = tSlice
for i := 0; i < size; i++ {
- _elem23 := &AggregateQueryRawResultTagNameElement{}
- if err := _elem23.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem23), err)
+ _elem25 := &AggregateQueryRawResultTagNameElement{}
+ if err := _elem25.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem25), err)
}
- p.Results = append(p.Results, _elem23)
+ p.Results = append(p.Results, _elem25)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -9743,11 +9951,11 @@ func (p *AggregateQueryRawResultTagNameElement) ReadField2(iprot thrift.TProtoco
tSlice := make([]*AggregateQueryRawResultTagValueElement, 0, size)
p.TagValues = tSlice
for i := 0; i < size; i++ {
- _elem24 := &AggregateQueryRawResultTagValueElement{}
- if err := _elem24.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem24), err)
+ _elem26 := &AggregateQueryRawResultTagValueElement{}
+ if err := _elem26.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem26), err)
}
- p.TagValues = append(p.TagValues, _elem24)
+ p.TagValues = append(p.TagValues, _elem26)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -10142,13 +10350,13 @@ func (p *AggregateQueryRequest) ReadField6(iprot thrift.TProtocol) error {
tSlice := make([]string, 0, size)
p.TagNameFilter = tSlice
for i := 0; i < size; i++ {
- var _elem25 string
+ var _elem27 string
if v, err := iprot.ReadString(); err != nil {
return thrift.PrependError("error reading field 0: ", err)
} else {
- _elem25 = v
+ _elem27 = v
}
- p.TagNameFilter = append(p.TagNameFilter, _elem25)
+ p.TagNameFilter = append(p.TagNameFilter, _elem27)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -10419,11 +10627,11 @@ func (p *AggregateQueryResult_) ReadField1(iprot thrift.TProtocol) error {
tSlice := make([]*AggregateQueryResultTagNameElement, 0, size)
p.Results = tSlice
for i := 0; i < size; i++ {
- _elem26 := &AggregateQueryResultTagNameElement{}
- if err := _elem26.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem26), err)
+ _elem28 := &AggregateQueryResultTagNameElement{}
+ if err := _elem28.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem28), err)
}
- p.Results = append(p.Results, _elem26)
+ p.Results = append(p.Results, _elem28)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -10587,11 +10795,11 @@ func (p *AggregateQueryResultTagNameElement) ReadField2(iprot thrift.TProtocol)
tSlice := make([]*AggregateQueryResultTagValueElement, 0, size)
p.TagValues = tSlice
for i := 0; i < size; i++ {
- _elem27 := &AggregateQueryResultTagValueElement{}
- if err := _elem27.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem27), err)
+ _elem29 := &AggregateQueryResultTagValueElement{}
+ if err := _elem29.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem29), err)
}
- p.TagValues = append(p.TagValues, _elem27)
+ p.TagValues = append(p.TagValues, _elem29)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -11248,11 +11456,11 @@ func (p *QueryResult_) ReadField1(iprot thrift.TProtocol) error {
tSlice := make([]*QueryResultElement, 0, size)
p.Results = tSlice
for i := 0; i < size; i++ {
- _elem28 := &QueryResultElement{}
- if err := _elem28.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem28), err)
+ _elem30 := &QueryResultElement{}
+ if err := _elem30.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem30), err)
}
- p.Results = append(p.Results, _elem28)
+ p.Results = append(p.Results, _elem30)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -11430,11 +11638,11 @@ func (p *QueryResultElement) ReadField2(iprot thrift.TProtocol) error {
tSlice := make([]*Tag, 0, size)
p.Tags = tSlice
for i := 0; i < size; i++ {
- _elem29 := &Tag{}
- if err := _elem29.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem29), err)
+ _elem31 := &Tag{}
+ if err := _elem31.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem31), err)
}
- p.Tags = append(p.Tags, _elem29)
+ p.Tags = append(p.Tags, _elem31)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -11450,13 +11658,13 @@ func (p *QueryResultElement) ReadField3(iprot thrift.TProtocol) error {
tSlice := make([]*Datapoint, 0, size)
p.Datapoints = tSlice
for i := 0; i < size; i++ {
- _elem30 := &Datapoint{
+ _elem32 := &Datapoint{
TimestampTimeType: 0,
}
- if err := _elem30.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem30), err)
+ if err := _elem32.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem32), err)
}
- p.Datapoints = append(p.Datapoints, _elem30)
+ p.Datapoints = append(p.Datapoints, _elem32)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -11995,11 +12203,11 @@ func (p *ConjunctionQuery) ReadField1(iprot thrift.TProtocol) error {
tSlice := make([]*Query, 0, size)
p.Queries = tSlice
for i := 0; i < size; i++ {
- _elem31 := &Query{}
- if err := _elem31.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem31), err)
+ _elem33 := &Query{}
+ if err := _elem33.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem33), err)
}
- p.Queries = append(p.Queries, _elem31)
+ p.Queries = append(p.Queries, _elem33)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -12113,11 +12321,11 @@ func (p *DisjunctionQuery) ReadField1(iprot thrift.TProtocol) error {
tSlice := make([]*Query, 0, size)
p.Queries = tSlice
for i := 0; i < size; i++ {
- _elem32 := &Query{}
- if err := _elem32.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem32), err)
+ _elem34 := &Query{}
+ if err := _elem34.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem34), err)
}
- p.Queries = append(p.Queries, _elem32)
+ p.Queries = append(p.Queries, _elem34)
}
if err := iprot.ReadListEnd(); err != nil {
return thrift.PrependError("error reading list end: ", err)
@@ -12698,688 +12906,910 @@ func (p *Query) String() string {
return fmt.Sprintf("Query(%+v)", *p)
}
-type Node interface {
- // Parameters:
- // - Req
- Query(req *QueryRequest) (r *QueryResult_, err error)
- // Parameters:
- // - Req
- AggregateRaw(req *AggregateQueryRawRequest) (r *AggregateQueryRawResult_, err error)
- // Parameters:
- // - Req
- Aggregate(req *AggregateQueryRequest) (r *AggregateQueryResult_, err error)
- // Parameters:
- // - Req
- Fetch(req *FetchRequest) (r *FetchResult_, err error)
- // Parameters:
- // - Req
- FetchTagged(req *FetchTaggedRequest) (r *FetchTaggedResult_, err error)
- // Parameters:
- // - Req
- Write(req *WriteRequest) (err error)
- // Parameters:
- // - Req
- WriteTagged(req *WriteTaggedRequest) (err error)
- // Parameters:
- // - Req
- FetchBatchRaw(req *FetchBatchRawRequest) (r *FetchBatchRawResult_, err error)
- // Parameters:
- // - Req
- FetchBatchRawV2(req *FetchBatchRawV2Request) (r *FetchBatchRawResult_, err error)
- // Parameters:
- // - Req
- FetchBlocksRaw(req *FetchBlocksRawRequest) (r *FetchBlocksRawResult_, err error)
- // Parameters:
- // - Req
- FetchBlocksMetadataRawV2(req *FetchBlocksMetadataRawV2Request) (r *FetchBlocksMetadataRawV2Result_, err error)
- // Parameters:
- // - Req
- WriteBatchRaw(req *WriteBatchRawRequest) (err error)
- // Parameters:
- // - Req
- WriteBatchRawV2(req *WriteBatchRawV2Request) (err error)
- // Parameters:
- // - Req
- WriteTaggedBatchRaw(req *WriteTaggedBatchRawRequest) (err error)
- // Parameters:
- // - Req
- WriteTaggedBatchRawV2(req *WriteTaggedBatchRawV2Request) (err error)
- Repair() (err error)
- // Parameters:
- // - Req
- Truncate(req *TruncateRequest) (r *TruncateResult_, err error)
- Health() (r *NodeHealthResult_, err error)
- Bootstrapped() (r *NodeBootstrappedResult_, err error)
- BootstrappedInPlacementOrNoPlacement() (r *NodeBootstrappedInPlacementOrNoPlacementResult_, err error)
- GetPersistRateLimit() (r *NodePersistRateLimitResult_, err error)
- // Parameters:
- // - Req
- SetPersistRateLimit(req *NodeSetPersistRateLimitRequest) (r *NodePersistRateLimitResult_, err error)
- GetWriteNewSeriesAsync() (r *NodeWriteNewSeriesAsyncResult_, err error)
- // Parameters:
- // - Req
- SetWriteNewSeriesAsync(req *NodeSetWriteNewSeriesAsyncRequest) (r *NodeWriteNewSeriesAsyncResult_, err error)
- GetWriteNewSeriesBackoffDuration() (r *NodeWriteNewSeriesBackoffDurationResult_, err error)
- // Parameters:
- // - Req
- SetWriteNewSeriesBackoffDuration(req *NodeSetWriteNewSeriesBackoffDurationRequest) (r *NodeWriteNewSeriesBackoffDurationResult_, err error)
- GetWriteNewSeriesLimitPerShardPerSecond() (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error)
- // Parameters:
- // - Req
- SetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error)
+// Attributes:
+// - Name
+// - FilePathTemplate
+// - Interval
+// - Duration
+// - Debug
+// - ConditionalNumGoroutinesGreaterThan
+// - ConditionalNumGoroutinesLessThan
+// - ConditionalIsOverloaded
+type DebugProfileStartRequest struct {
+ Name string `thrift:"name,1,required" db:"name" json:"name"`
+ FilePathTemplate string `thrift:"filePathTemplate,2,required" db:"filePathTemplate" json:"filePathTemplate"`
+ Interval *string `thrift:"interval,3" db:"interval" json:"interval,omitempty"`
+ Duration *string `thrift:"duration,4" db:"duration" json:"duration,omitempty"`
+ Debug *int64 `thrift:"debug,5" db:"debug" json:"debug,omitempty"`
+ ConditionalNumGoroutinesGreaterThan *int64 `thrift:"conditionalNumGoroutinesGreaterThan,6" db:"conditionalNumGoroutinesGreaterThan" json:"conditionalNumGoroutinesGreaterThan,omitempty"`
+ ConditionalNumGoroutinesLessThan *int64 `thrift:"conditionalNumGoroutinesLessThan,7" db:"conditionalNumGoroutinesLessThan" json:"conditionalNumGoroutinesLessThan,omitempty"`
+ ConditionalIsOverloaded *bool `thrift:"conditionalIsOverloaded,8" db:"conditionalIsOverloaded" json:"conditionalIsOverloaded,omitempty"`
+}
+
+func NewDebugProfileStartRequest() *DebugProfileStartRequest {
+ return &DebugProfileStartRequest{}
+}
+
+func (p *DebugProfileStartRequest) GetName() string {
+ return p.Name
}
-type NodeClient struct {
- Transport thrift.TTransport
- ProtocolFactory thrift.TProtocolFactory
- InputProtocol thrift.TProtocol
- OutputProtocol thrift.TProtocol
- SeqId int32
+func (p *DebugProfileStartRequest) GetFilePathTemplate() string {
+ return p.FilePathTemplate
}
-func NewNodeClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *NodeClient {
- return &NodeClient{Transport: t,
- ProtocolFactory: f,
- InputProtocol: f.GetProtocol(t),
- OutputProtocol: f.GetProtocol(t),
- SeqId: 0,
+var DebugProfileStartRequest_Interval_DEFAULT string
+
+func (p *DebugProfileStartRequest) GetInterval() string {
+ if !p.IsSetInterval() {
+ return DebugProfileStartRequest_Interval_DEFAULT
}
+ return *p.Interval
}
-func NewNodeClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *NodeClient {
- return &NodeClient{Transport: t,
- ProtocolFactory: nil,
- InputProtocol: iprot,
- OutputProtocol: oprot,
- SeqId: 0,
+var DebugProfileStartRequest_Duration_DEFAULT string
+
+func (p *DebugProfileStartRequest) GetDuration() string {
+ if !p.IsSetDuration() {
+ return DebugProfileStartRequest_Duration_DEFAULT
}
+ return *p.Duration
}
-// Parameters:
-// - Req
-func (p *NodeClient) Query(req *QueryRequest) (r *QueryResult_, err error) {
- if err = p.sendQuery(req); err != nil {
- return
+var DebugProfileStartRequest_Debug_DEFAULT int64
+
+func (p *DebugProfileStartRequest) GetDebug() int64 {
+ if !p.IsSetDebug() {
+ return DebugProfileStartRequest_Debug_DEFAULT
}
- return p.recvQuery()
+ return *p.Debug
}
-func (p *NodeClient) sendQuery(req *QueryRequest) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("query", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := NodeQueryArgs{
- Req: req,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
+var DebugProfileStartRequest_ConditionalNumGoroutinesGreaterThan_DEFAULT int64
+
+func (p *DebugProfileStartRequest) GetConditionalNumGoroutinesGreaterThan() int64 {
+ if !p.IsSetConditionalNumGoroutinesGreaterThan() {
+ return DebugProfileStartRequest_ConditionalNumGoroutinesGreaterThan_DEFAULT
}
- return oprot.Flush()
+ return *p.ConditionalNumGoroutinesGreaterThan
}
-func (p *NodeClient) recvQuery() (value *QueryResult_, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
+var DebugProfileStartRequest_ConditionalNumGoroutinesLessThan_DEFAULT int64
+
+func (p *DebugProfileStartRequest) GetConditionalNumGoroutinesLessThan() int64 {
+ if !p.IsSetConditionalNumGoroutinesLessThan() {
+ return DebugProfileStartRequest_ConditionalNumGoroutinesLessThan_DEFAULT
}
- if method != "query" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "query failed: wrong method name")
- return
+ return *p.ConditionalNumGoroutinesLessThan
+}
+
+var DebugProfileStartRequest_ConditionalIsOverloaded_DEFAULT bool
+
+func (p *DebugProfileStartRequest) GetConditionalIsOverloaded() bool {
+ if !p.IsSetConditionalIsOverloaded() {
+ return DebugProfileStartRequest_ConditionalIsOverloaded_DEFAULT
}
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "query failed: out of sequence response")
- return
+ return *p.ConditionalIsOverloaded
+}
+func (p *DebugProfileStartRequest) IsSetInterval() bool {
+ return p.Interval != nil
+}
+
+func (p *DebugProfileStartRequest) IsSetDuration() bool {
+ return p.Duration != nil
+}
+
+func (p *DebugProfileStartRequest) IsSetDebug() bool {
+ return p.Debug != nil
+}
+
+func (p *DebugProfileStartRequest) IsSetConditionalNumGoroutinesGreaterThan() bool {
+ return p.ConditionalNumGoroutinesGreaterThan != nil
+}
+
+func (p *DebugProfileStartRequest) IsSetConditionalNumGoroutinesLessThan() bool {
+ return p.ConditionalNumGoroutinesLessThan != nil
+}
+
+func (p *DebugProfileStartRequest) IsSetConditionalIsOverloaded() bool {
+ return p.ConditionalIsOverloaded != nil
+}
+
+func (p *DebugProfileStartRequest) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
- if mTypeId == thrift.EXCEPTION {
- error33 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error34 error
- error34, err = error33.Read(iprot)
+
+ var issetName bool = false
+ var issetFilePathTemplate bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
- return
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ issetName = true
+ case 2:
+ if err := p.ReadField2(iprot); err != nil {
+ return err
+ }
+ issetFilePathTemplate = true
+ case 3:
+ if err := p.ReadField3(iprot); err != nil {
+ return err
+ }
+ case 4:
+ if err := p.ReadField4(iprot); err != nil {
+ return err
+ }
+ case 5:
+ if err := p.ReadField5(iprot); err != nil {
+ return err
+ }
+ case 6:
+ if err := p.ReadField6(iprot); err != nil {
+ return err
+ }
+ case 7:
+ if err := p.ReadField7(iprot); err != nil {
+ return err
+ }
+ case 8:
+ if err := p.ReadField8(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
}
- err = error34
- return
- }
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "query failed: invalid message type")
- return
}
- result := NodeQueryResult{}
- if err = result.Read(iprot); err != nil {
- return
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if !issetName {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Name is not set"))
}
- if result.Err != nil {
- err = result.Err
- return
+ if !issetFilePathTemplate {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FilePathTemplate is not set"))
}
- value = result.GetSuccess()
- return
+ return nil
}
-// Parameters:
-// - Req
-func (p *NodeClient) AggregateRaw(req *AggregateQueryRawRequest) (r *AggregateQueryRawResult_, err error) {
- if err = p.sendAggregateRaw(req); err != nil {
- return
+func (p *DebugProfileStartRequest) ReadField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Name = v
}
- return p.recvAggregateRaw()
+ return nil
}
-func (p *NodeClient) sendAggregateRaw(req *AggregateQueryRawRequest) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("aggregateRaw", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := NodeAggregateRawArgs{
- Req: req,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
+func (p *DebugProfileStartRequest) ReadField2(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+ } else {
+ p.FilePathTemplate = v
}
- return oprot.Flush()
+ return nil
}
-func (p *NodeClient) recvAggregateRaw() (value *AggregateQueryRawResult_, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
- }
- if method != "aggregateRaw" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "aggregateRaw failed: wrong method name")
- return
- }
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "aggregateRaw failed: out of sequence response")
- return
- }
- if mTypeId == thrift.EXCEPTION {
- error35 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error36 error
- error36, err = error35.Read(iprot)
- if err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- err = error36
- return
+func (p *DebugProfileStartRequest) ReadField3(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+ } else {
+ p.Interval = &v
}
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "aggregateRaw failed: invalid message type")
- return
+ return nil
+}
+
+func (p *DebugProfileStartRequest) ReadField4(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+ } else {
+ p.Duration = &v
}
- result := NodeAggregateRawResult{}
- if err = result.Read(iprot); err != nil {
- return
+ return nil
+}
+
+func (p *DebugProfileStartRequest) ReadField5(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+ } else {
+ p.Debug = &v
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ return nil
+}
+
+func (p *DebugProfileStartRequest) ReadField6(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 6: ", err)
+ } else {
+ p.ConditionalNumGoroutinesGreaterThan = &v
}
- if result.Err != nil {
- err = result.Err
- return
+ return nil
+}
+
+func (p *DebugProfileStartRequest) ReadField7(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+ } else {
+ p.ConditionalNumGoroutinesLessThan = &v
}
- value = result.GetSuccess()
- return
+ return nil
}
-// Parameters:
-// - Req
-func (p *NodeClient) Aggregate(req *AggregateQueryRequest) (r *AggregateQueryResult_, err error) {
- if err = p.sendAggregate(req); err != nil {
- return
+func (p *DebugProfileStartRequest) ReadField8(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(); err != nil {
+ return thrift.PrependError("error reading field 8: ", err)
+ } else {
+ p.ConditionalIsOverloaded = &v
}
- return p.recvAggregate()
+ return nil
}
-func (p *NodeClient) sendAggregate(req *AggregateQueryRequest) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
+func (p *DebugProfileStartRequest) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("DebugProfileStartRequest"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
- p.SeqId++
- if err = oprot.WriteMessageBegin("aggregate", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := NodeAggregateArgs{
- Req: req,
+ if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField2(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField3(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField4(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField5(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField6(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField7(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField8(oprot); err != nil {
+ return err
+ }
}
- if err = args.Write(oprot); err != nil {
- return
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
}
- if err = oprot.WriteMessageEnd(); err != nil {
- return
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
}
- return oprot.Flush()
+ return nil
}
-func (p *NodeClient) recvAggregate() (value *AggregateQueryResult_, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
- }
- if method != "aggregate" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "aggregate failed: wrong method name")
- return
- }
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "aggregate failed: out of sequence response")
- return
+func (p *DebugProfileStartRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err)
}
- if mTypeId == thrift.EXCEPTION {
- error37 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error38 error
- error38, err = error37.Read(iprot)
- if err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- err = error38
- return
+ if err := oprot.WriteString(string(p.Name)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err)
}
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "aggregate failed: invalid message type")
- return
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err)
}
- result := NodeAggregateResult{}
- if err = result.Read(iprot); err != nil {
- return
+ return err
+}
+
+func (p *DebugProfileStartRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("filePathTemplate", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:filePathTemplate: ", p), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if err := oprot.WriteString(string(p.FilePathTemplate)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.filePathTemplate (2) field write error: ", p), err)
}
- if result.Err != nil {
- err = result.Err
- return
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:filePathTemplate: ", p), err)
}
- value = result.GetSuccess()
- return
+ return err
}
-// Parameters:
-// - Req
-func (p *NodeClient) Fetch(req *FetchRequest) (r *FetchResult_, err error) {
- if err = p.sendFetch(req); err != nil {
- return
+func (p *DebugProfileStartRequest) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetInterval() {
+ if err := oprot.WriteFieldBegin("interval", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:interval: ", p), err)
+ }
+ if err := oprot.WriteString(string(*p.Interval)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.interval (3) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:interval: ", p), err)
+ }
}
- return p.recvFetch()
+ return err
}
-func (p *NodeClient) sendFetch(req *FetchRequest) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("fetch", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := NodeFetchArgs{
- Req: req,
+func (p *DebugProfileStartRequest) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDuration() {
+ if err := oprot.WriteFieldBegin("duration", thrift.STRING, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:duration: ", p), err)
+ }
+ if err := oprot.WriteString(string(*p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (4) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:duration: ", p), err)
+ }
}
- if err = args.Write(oprot); err != nil {
- return
+ return err
+}
+
+func (p *DebugProfileStartRequest) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDebug() {
+ if err := oprot.WriteFieldBegin("debug", thrift.I64, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:debug: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.Debug)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.debug (5) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:debug: ", p), err)
+ }
}
- if err = oprot.WriteMessageEnd(); err != nil {
- return
+ return err
+}
+
+func (p *DebugProfileStartRequest) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetConditionalNumGoroutinesGreaterThan() {
+ if err := oprot.WriteFieldBegin("conditionalNumGoroutinesGreaterThan", thrift.I64, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:conditionalNumGoroutinesGreaterThan: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.ConditionalNumGoroutinesGreaterThan)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.conditionalNumGoroutinesGreaterThan (6) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:conditionalNumGoroutinesGreaterThan: ", p), err)
+ }
}
- return oprot.Flush()
+ return err
}
-func (p *NodeClient) recvFetch() (value *FetchResult_, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
+func (p *DebugProfileStartRequest) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetConditionalNumGoroutinesLessThan() {
+ if err := oprot.WriteFieldBegin("conditionalNumGoroutinesLessThan", thrift.I64, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:conditionalNumGoroutinesLessThan: ", p), err)
+ }
+ if err := oprot.WriteI64(int64(*p.ConditionalNumGoroutinesLessThan)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.conditionalNumGoroutinesLessThan (7) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:conditionalNumGoroutinesLessThan: ", p), err)
+ }
}
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
+ return err
+}
+
+func (p *DebugProfileStartRequest) writeField8(oprot thrift.TProtocol) (err error) {
+ if p.IsSetConditionalIsOverloaded() {
+ if err := oprot.WriteFieldBegin("conditionalIsOverloaded", thrift.BOOL, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:conditionalIsOverloaded: ", p), err)
+ }
+ if err := oprot.WriteBool(bool(*p.ConditionalIsOverloaded)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.conditionalIsOverloaded (8) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:conditionalIsOverloaded: ", p), err)
+ }
}
- if method != "fetch" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetch failed: wrong method name")
- return
+ return err
+}
+
+func (p *DebugProfileStartRequest) String() string {
+ if p == nil {
+ return ""
}
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetch failed: out of sequence response")
- return
+ return fmt.Sprintf("DebugProfileStartRequest(%+v)", *p)
+}
+
+type DebugProfileStartResult_ struct {
+}
+
+func NewDebugProfileStartResult_() *DebugProfileStartResult_ {
+ return &DebugProfileStartResult_{}
+}
+
+func (p *DebugProfileStartResult_) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
- if mTypeId == thrift.EXCEPTION {
- error39 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error40 error
- error40, err = error39.Read(iprot)
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
- return
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
}
- err = error40
- return
}
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetch failed: invalid message type")
- return
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
- result := NodeFetchResult{}
- if err = result.Read(iprot); err != nil {
- return
+ return nil
+}
+
+func (p *DebugProfileStartResult_) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("DebugProfileStartResult"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if p != nil {
}
- if result.Err != nil {
- err = result.Err
- return
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
}
- value = result.GetSuccess()
- return
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
}
-// Parameters:
-// - Req
-func (p *NodeClient) FetchTagged(req *FetchTaggedRequest) (r *FetchTaggedResult_, err error) {
- if err = p.sendFetchTagged(req); err != nil {
- return
+func (p *DebugProfileStartResult_) String() string {
+ if p == nil {
+ return ""
}
- return p.recvFetchTagged()
+ return fmt.Sprintf("DebugProfileStartResult_(%+v)", *p)
}
-func (p *NodeClient) sendFetchTagged(req *FetchTaggedRequest) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("fetchTagged", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := NodeFetchTaggedArgs{
- Req: req,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
+// Attributes:
+// - Name
+type DebugProfileStopRequest struct {
+ Name string `thrift:"name,1,required" db:"name" json:"name"`
}
-func (p *NodeClient) recvFetchTagged() (value *FetchTaggedResult_, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
- }
- if method != "fetchTagged" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchTagged failed: wrong method name")
- return
- }
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchTagged failed: out of sequence response")
- return
+func NewDebugProfileStopRequest() *DebugProfileStopRequest {
+ return &DebugProfileStopRequest{}
+}
+
+func (p *DebugProfileStopRequest) GetName() string {
+ return p.Name
+}
+func (p *DebugProfileStopRequest) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
- if mTypeId == thrift.EXCEPTION {
- error41 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error42 error
- error42, err = error41.Read(iprot)
+
+ var issetName bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
- return
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ issetName = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
}
- err = error42
- return
- }
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchTagged failed: invalid message type")
- return
- }
- result := NodeFetchTaggedResult{}
- if err = result.Read(iprot); err != nil {
- return
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
- if result.Err != nil {
- err = result.Err
- return
+ if !issetName {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Name is not set"))
}
- value = result.GetSuccess()
- return
+ return nil
}
-// Parameters:
-// - Req
-func (p *NodeClient) Write(req *WriteRequest) (err error) {
- if err = p.sendWrite(req); err != nil {
- return
+func (p *DebugProfileStopRequest) ReadField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Name = v
}
- return p.recvWrite()
+ return nil
}
-func (p *NodeClient) sendWrite(req *WriteRequest) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("write", thrift.CALL, p.SeqId); err != nil {
- return
+func (p *DebugProfileStopRequest) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("DebugProfileStopRequest"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
- args := NodeWriteArgs{
- Req: req,
+ if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
- if err = args.Write(oprot); err != nil {
- return
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
}
- if err = oprot.WriteMessageEnd(); err != nil {
- return
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
}
- return oprot.Flush()
+ return nil
}
-func (p *NodeClient) recvWrite() (err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
+func (p *DebugProfileStopRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err)
}
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
+ if err := oprot.WriteString(string(p.Name)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err)
}
- if method != "write" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "write failed: wrong method name")
- return
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err)
}
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "write failed: out of sequence response")
- return
+ return err
+}
+
+func (p *DebugProfileStopRequest) String() string {
+ if p == nil {
+ return ""
}
- if mTypeId == thrift.EXCEPTION {
- error43 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error44 error
- error44, err = error43.Read(iprot)
+ return fmt.Sprintf("DebugProfileStopRequest(%+v)", *p)
+}
+
+type DebugProfileStopResult_ struct {
+}
+
+func NewDebugProfileStopResult_() *DebugProfileStopResult_ {
+ return &DebugProfileStopResult_{}
+}
+
+func (p *DebugProfileStopResult_) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
- return
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
}
- err = error44
- return
}
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "write failed: invalid message type")
- return
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
- result := NodeWriteResult{}
- if err = result.Read(iprot); err != nil {
- return
+ return nil
+}
+
+func (p *DebugProfileStopResult_) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("DebugProfileStopResult"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if p != nil {
}
- if result.Err != nil {
- err = result.Err
- return
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
}
- return
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
}
-// Parameters:
-// - Req
-func (p *NodeClient) WriteTagged(req *WriteTaggedRequest) (err error) {
- if err = p.sendWriteTagged(req); err != nil {
- return
+func (p *DebugProfileStopResult_) String() string {
+ if p == nil {
+ return ""
}
- return p.recvWriteTagged()
+ return fmt.Sprintf("DebugProfileStopResult_(%+v)", *p)
}
-func (p *NodeClient) sendWriteTagged(req *WriteTaggedRequest) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("writeTagged", thrift.CALL, p.SeqId); err != nil {
- return
+// Attributes:
+// - Directory
+type DebugIndexMemorySegmentsRequest struct {
+ Directory string `thrift:"directory,1,required" db:"directory" json:"directory"`
+}
+
+func NewDebugIndexMemorySegmentsRequest() *DebugIndexMemorySegmentsRequest {
+ return &DebugIndexMemorySegmentsRequest{}
+}
+
+func (p *DebugIndexMemorySegmentsRequest) GetDirectory() string {
+ return p.Directory
+}
+func (p *DebugIndexMemorySegmentsRequest) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
- args := NodeWriteTaggedArgs{
- Req: req,
+
+ var issetDirectory bool = false
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ issetDirectory = true
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
}
- if err = args.Write(oprot); err != nil {
- return
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
- if err = oprot.WriteMessageEnd(); err != nil {
- return
+ if !issetDirectory {
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Directory is not set"))
}
- return oprot.Flush()
+ return nil
}
-func (p *NodeClient) recvWriteTagged() (err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
+func (p *DebugIndexMemorySegmentsRequest) ReadField1(iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+ } else {
+ p.Directory = v
}
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
+ return nil
+}
+
+func (p *DebugIndexMemorySegmentsRequest) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("DebugIndexMemorySegmentsRequest"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
- if method != "writeTagged" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeTagged failed: wrong method name")
- return
+ if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeTagged failed: out of sequence response")
- return
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
}
- if mTypeId == thrift.EXCEPTION {
- error45 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error46 error
- error46, err = error45.Read(iprot)
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *DebugIndexMemorySegmentsRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("directory", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:directory: ", p), err)
+ }
+ if err := oprot.WriteString(string(p.Directory)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.directory (1) field write error: ", p), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:directory: ", p), err)
+ }
+ return err
+}
+
+func (p *DebugIndexMemorySegmentsRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("DebugIndexMemorySegmentsRequest(%+v)", *p)
+}
+
+type DebugIndexMemorySegmentsResult_ struct {
+}
+
+func NewDebugIndexMemorySegmentsResult_() *DebugIndexMemorySegmentsResult_ {
+ return &DebugIndexMemorySegmentsResult_{}
+}
+
+func (p *DebugIndexMemorySegmentsResult_) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
if err != nil {
- return
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
}
- err = error46
- return
}
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeTagged failed: invalid message type")
- return
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
- result := NodeWriteTaggedResult{}
- if err = result.Read(iprot); err != nil {
- return
+ return nil
+}
+
+func (p *DebugIndexMemorySegmentsResult_) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("DebugIndexMemorySegmentsResult"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
- if err = iprot.ReadMessageEnd(); err != nil {
- return
+ if p != nil {
}
- if result.Err != nil {
- err = result.Err
- return
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *DebugIndexMemorySegmentsResult_) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("DebugIndexMemorySegmentsResult_(%+v)", *p)
+}
+
+type Node interface {
+ // Parameters:
+ // - Req
+ Query(req *QueryRequest) (r *QueryResult_, err error)
+ // Parameters:
+ // - Req
+ Aggregate(req *AggregateQueryRequest) (r *AggregateQueryResult_, err error)
+ // Parameters:
+ // - Req
+ Fetch(req *FetchRequest) (r *FetchResult_, err error)
+ // Parameters:
+ // - Req
+ Write(req *WriteRequest) (err error)
+ // Parameters:
+ // - Req
+ WriteTagged(req *WriteTaggedRequest) (err error)
+ // Parameters:
+ // - Req
+ FetchBatchRaw(req *FetchBatchRawRequest) (r *FetchBatchRawResult_, err error)
+ // Parameters:
+ // - Req
+ FetchBatchRawV2(req *FetchBatchRawV2Request) (r *FetchBatchRawResult_, err error)
+ // Parameters:
+ // - Req
+ FetchBlocksRaw(req *FetchBlocksRawRequest) (r *FetchBlocksRawResult_, err error)
+ // Parameters:
+ // - Req
+ FetchTagged(req *FetchTaggedRequest) (r *FetchTaggedResult_, err error)
+ // Parameters:
+ // - Req
+ AggregateRaw(req *AggregateQueryRawRequest) (r *AggregateQueryRawResult_, err error)
+ // Parameters:
+ // - Req
+ FetchBlocksMetadataRawV2(req *FetchBlocksMetadataRawV2Request) (r *FetchBlocksMetadataRawV2Result_, err error)
+ // Parameters:
+ // - Req
+ WriteBatchRaw(req *WriteBatchRawRequest) (err error)
+ // Parameters:
+ // - Req
+ WriteBatchRawV2(req *WriteBatchRawV2Request) (err error)
+ // Parameters:
+ // - Req
+ WriteTaggedBatchRaw(req *WriteTaggedBatchRawRequest) (err error)
+ // Parameters:
+ // - Req
+ WriteTaggedBatchRawV2(req *WriteTaggedBatchRawV2Request) (err error)
+ Repair() (err error)
+ // Parameters:
+ // - Req
+ Truncate(req *TruncateRequest) (r *TruncateResult_, err error)
+ Health() (r *NodeHealthResult_, err error)
+ Bootstrapped() (r *NodeBootstrappedResult_, err error)
+ BootstrappedInPlacementOrNoPlacement() (r *NodeBootstrappedInPlacementOrNoPlacementResult_, err error)
+ GetPersistRateLimit() (r *NodePersistRateLimitResult_, err error)
+ // Parameters:
+ // - Req
+ SetPersistRateLimit(req *NodeSetPersistRateLimitRequest) (r *NodePersistRateLimitResult_, err error)
+ GetWriteNewSeriesAsync() (r *NodeWriteNewSeriesAsyncResult_, err error)
+ // Parameters:
+ // - Req
+ SetWriteNewSeriesAsync(req *NodeSetWriteNewSeriesAsyncRequest) (r *NodeWriteNewSeriesAsyncResult_, err error)
+ GetWriteNewSeriesBackoffDuration() (r *NodeWriteNewSeriesBackoffDurationResult_, err error)
+ // Parameters:
+ // - Req
+ SetWriteNewSeriesBackoffDuration(req *NodeSetWriteNewSeriesBackoffDurationRequest) (r *NodeWriteNewSeriesBackoffDurationResult_, err error)
+ GetWriteNewSeriesLimitPerShardPerSecond() (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error)
+ // Parameters:
+ // - Req
+ SetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error)
+ // Parameters:
+ // - Req
+ DebugProfileStart(req *DebugProfileStartRequest) (r *DebugProfileStartResult_, err error)
+ // Parameters:
+ // - Req
+ DebugProfileStop(req *DebugProfileStopRequest) (r *DebugProfileStopResult_, err error)
+ // Parameters:
+ // - Req
+ DebugIndexMemorySegments(req *DebugIndexMemorySegmentsRequest) (r *DebugIndexMemorySegmentsResult_, err error)
+}
+
+type NodeClient struct {
+ Transport thrift.TTransport
+ ProtocolFactory thrift.TProtocolFactory
+ InputProtocol thrift.TProtocol
+ OutputProtocol thrift.TProtocol
+ SeqId int32
+}
+
+func NewNodeClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *NodeClient {
+ return &NodeClient{Transport: t,
+ ProtocolFactory: f,
+ InputProtocol: f.GetProtocol(t),
+ OutputProtocol: f.GetProtocol(t),
+ SeqId: 0,
+ }
+}
+
+func NewNodeClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *NodeClient {
+ return &NodeClient{Transport: t,
+ ProtocolFactory: nil,
+ InputProtocol: iprot,
+ OutputProtocol: oprot,
+ SeqId: 0,
}
- return
}
// Parameters:
// - Req
-func (p *NodeClient) FetchBatchRaw(req *FetchBatchRawRequest) (r *FetchBatchRawResult_, err error) {
- if err = p.sendFetchBatchRaw(req); err != nil {
+func (p *NodeClient) Query(req *QueryRequest) (r *QueryResult_, err error) {
+ if err = p.sendQuery(req); err != nil {
return
}
- return p.recvFetchBatchRaw()
+ return p.recvQuery()
}
-func (p *NodeClient) sendFetchBatchRaw(req *FetchBatchRawRequest) (err error) {
+func (p *NodeClient) sendQuery(req *QueryRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("fetchBatchRaw", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("query", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeFetchBatchRawArgs{
+ args := NodeQueryArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13391,7 +13821,7 @@ func (p *NodeClient) sendFetchBatchRaw(req *FetchBatchRawRequest) (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvFetchBatchRaw() (value *FetchBatchRawResult_, err error) {
+func (p *NodeClient) recvQuery() (value *QueryResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13401,32 +13831,32 @@ func (p *NodeClient) recvFetchBatchRaw() (value *FetchBatchRawResult_, err error
if err != nil {
return
}
- if method != "fetchBatchRaw" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBatchRaw failed: wrong method name")
+ if method != "query" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "query failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBatchRaw failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "query failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error47 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error48 error
- error48, err = error47.Read(iprot)
+ error35 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error36 error
+ error36, err = error35.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error48
+ err = error36
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBatchRaw failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "query failed: invalid message type")
return
}
- result := NodeFetchBatchRawResult{}
+ result := NodeQueryResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -13443,24 +13873,24 @@ func (p *NodeClient) recvFetchBatchRaw() (value *FetchBatchRawResult_, err error
// Parameters:
// - Req
-func (p *NodeClient) FetchBatchRawV2(req *FetchBatchRawV2Request) (r *FetchBatchRawResult_, err error) {
- if err = p.sendFetchBatchRawV2(req); err != nil {
+func (p *NodeClient) Aggregate(req *AggregateQueryRequest) (r *AggregateQueryResult_, err error) {
+ if err = p.sendAggregate(req); err != nil {
return
}
- return p.recvFetchBatchRawV2()
+ return p.recvAggregate()
}
-func (p *NodeClient) sendFetchBatchRawV2(req *FetchBatchRawV2Request) (err error) {
+func (p *NodeClient) sendAggregate(req *AggregateQueryRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("fetchBatchRawV2", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("aggregate", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeFetchBatchRawV2Args{
+ args := NodeAggregateArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13472,7 +13902,7 @@ func (p *NodeClient) sendFetchBatchRawV2(req *FetchBatchRawV2Request) (err error
return oprot.Flush()
}
-func (p *NodeClient) recvFetchBatchRawV2() (value *FetchBatchRawResult_, err error) {
+func (p *NodeClient) recvAggregate() (value *AggregateQueryResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13482,32 +13912,32 @@ func (p *NodeClient) recvFetchBatchRawV2() (value *FetchBatchRawResult_, err err
if err != nil {
return
}
- if method != "fetchBatchRawV2" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBatchRawV2 failed: wrong method name")
+ if method != "aggregate" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "aggregate failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBatchRawV2 failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "aggregate failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error49 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error50 error
- error50, err = error49.Read(iprot)
+ error37 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error38 error
+ error38, err = error37.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error50
+ err = error38
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBatchRawV2 failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "aggregate failed: invalid message type")
return
}
- result := NodeFetchBatchRawV2Result{}
+ result := NodeAggregateResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -13524,24 +13954,24 @@ func (p *NodeClient) recvFetchBatchRawV2() (value *FetchBatchRawResult_, err err
// Parameters:
// - Req
-func (p *NodeClient) FetchBlocksRaw(req *FetchBlocksRawRequest) (r *FetchBlocksRawResult_, err error) {
- if err = p.sendFetchBlocksRaw(req); err != nil {
+func (p *NodeClient) Fetch(req *FetchRequest) (r *FetchResult_, err error) {
+ if err = p.sendFetch(req); err != nil {
return
}
- return p.recvFetchBlocksRaw()
+ return p.recvFetch()
}
-func (p *NodeClient) sendFetchBlocksRaw(req *FetchBlocksRawRequest) (err error) {
+func (p *NodeClient) sendFetch(req *FetchRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("fetchBlocksRaw", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("fetch", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeFetchBlocksRawArgs{
+ args := NodeFetchArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13553,7 +13983,7 @@ func (p *NodeClient) sendFetchBlocksRaw(req *FetchBlocksRawRequest) (err error)
return oprot.Flush()
}
-func (p *NodeClient) recvFetchBlocksRaw() (value *FetchBlocksRawResult_, err error) {
+func (p *NodeClient) recvFetch() (value *FetchResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13563,32 +13993,32 @@ func (p *NodeClient) recvFetchBlocksRaw() (value *FetchBlocksRawResult_, err err
if err != nil {
return
}
- if method != "fetchBlocksRaw" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBlocksRaw failed: wrong method name")
+ if method != "fetch" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetch failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBlocksRaw failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetch failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error51 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error52 error
- error52, err = error51.Read(iprot)
+ error39 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error40 error
+ error40, err = error39.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error52
+ err = error40
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBlocksRaw failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetch failed: invalid message type")
return
}
- result := NodeFetchBlocksRawResult{}
+ result := NodeFetchResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -13605,24 +14035,24 @@ func (p *NodeClient) recvFetchBlocksRaw() (value *FetchBlocksRawResult_, err err
// Parameters:
// - Req
-func (p *NodeClient) FetchBlocksMetadataRawV2(req *FetchBlocksMetadataRawV2Request) (r *FetchBlocksMetadataRawV2Result_, err error) {
- if err = p.sendFetchBlocksMetadataRawV2(req); err != nil {
+func (p *NodeClient) Write(req *WriteRequest) (err error) {
+ if err = p.sendWrite(req); err != nil {
return
}
- return p.recvFetchBlocksMetadataRawV2()
+ return p.recvWrite()
}
-func (p *NodeClient) sendFetchBlocksMetadataRawV2(req *FetchBlocksMetadataRawV2Request) (err error) {
+func (p *NodeClient) sendWrite(req *WriteRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("write", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeFetchBlocksMetadataRawV2Args{
+ args := NodeWriteArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13634,7 +14064,7 @@ func (p *NodeClient) sendFetchBlocksMetadataRawV2(req *FetchBlocksMetadataRawV2R
return oprot.Flush()
}
-func (p *NodeClient) recvFetchBlocksMetadataRawV2() (value *FetchBlocksMetadataRawV2Result_, err error) {
+func (p *NodeClient) recvWrite() (err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13644,32 +14074,32 @@ func (p *NodeClient) recvFetchBlocksMetadataRawV2() (value *FetchBlocksMetadataR
if err != nil {
return
}
- if method != "fetchBlocksMetadataRawV2" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBlocksMetadataRawV2 failed: wrong method name")
+ if method != "write" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "write failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBlocksMetadataRawV2 failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "write failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error53 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error54 error
- error54, err = error53.Read(iprot)
+ error41 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error42 error
+ error42, err = error41.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error54
+ err = error42
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBlocksMetadataRawV2 failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "write failed: invalid message type")
return
}
- result := NodeFetchBlocksMetadataRawV2Result{}
+ result := NodeWriteResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -13680,30 +14110,29 @@ func (p *NodeClient) recvFetchBlocksMetadataRawV2() (value *FetchBlocksMetadataR
err = result.Err
return
}
- value = result.GetSuccess()
return
}
// Parameters:
// - Req
-func (p *NodeClient) WriteBatchRaw(req *WriteBatchRawRequest) (err error) {
- if err = p.sendWriteBatchRaw(req); err != nil {
+func (p *NodeClient) WriteTagged(req *WriteTaggedRequest) (err error) {
+ if err = p.sendWriteTagged(req); err != nil {
return
}
- return p.recvWriteBatchRaw()
+ return p.recvWriteTagged()
}
-func (p *NodeClient) sendWriteBatchRaw(req *WriteBatchRawRequest) (err error) {
+func (p *NodeClient) sendWriteTagged(req *WriteTaggedRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("writeBatchRaw", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("writeTagged", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeWriteBatchRawArgs{
+ args := NodeWriteTaggedArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13715,7 +14144,7 @@ func (p *NodeClient) sendWriteBatchRaw(req *WriteBatchRawRequest) (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvWriteBatchRaw() (err error) {
+func (p *NodeClient) recvWriteTagged() (err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13725,32 +14154,32 @@ func (p *NodeClient) recvWriteBatchRaw() (err error) {
if err != nil {
return
}
- if method != "writeBatchRaw" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeBatchRaw failed: wrong method name")
+ if method != "writeTagged" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeTagged failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeBatchRaw failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeTagged failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error55 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error56 error
- error56, err = error55.Read(iprot)
+ error43 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error44 error
+ error44, err = error43.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error56
+ err = error44
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeBatchRaw failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeTagged failed: invalid message type")
return
}
- result := NodeWriteBatchRawResult{}
+ result := NodeWriteTaggedResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -13766,24 +14195,24 @@ func (p *NodeClient) recvWriteBatchRaw() (err error) {
// Parameters:
// - Req
-func (p *NodeClient) WriteBatchRawV2(req *WriteBatchRawV2Request) (err error) {
- if err = p.sendWriteBatchRawV2(req); err != nil {
+func (p *NodeClient) FetchBatchRaw(req *FetchBatchRawRequest) (r *FetchBatchRawResult_, err error) {
+ if err = p.sendFetchBatchRaw(req); err != nil {
return
}
- return p.recvWriteBatchRawV2()
+ return p.recvFetchBatchRaw()
}
-func (p *NodeClient) sendWriteBatchRawV2(req *WriteBatchRawV2Request) (err error) {
+func (p *NodeClient) sendFetchBatchRaw(req *FetchBatchRawRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("writeBatchRawV2", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("fetchBatchRaw", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeWriteBatchRawV2Args{
+ args := NodeFetchBatchRawArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13795,7 +14224,7 @@ func (p *NodeClient) sendWriteBatchRawV2(req *WriteBatchRawV2Request) (err error
return oprot.Flush()
}
-func (p *NodeClient) recvWriteBatchRawV2() (err error) {
+func (p *NodeClient) recvFetchBatchRaw() (value *FetchBatchRawResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13805,32 +14234,32 @@ func (p *NodeClient) recvWriteBatchRawV2() (err error) {
if err != nil {
return
}
- if method != "writeBatchRawV2" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeBatchRawV2 failed: wrong method name")
+ if method != "fetchBatchRaw" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBatchRaw failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeBatchRawV2 failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBatchRaw failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error57 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error58 error
- error58, err = error57.Read(iprot)
+ error45 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error46 error
+ error46, err = error45.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error58
+ err = error46
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeBatchRawV2 failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBatchRaw failed: invalid message type")
return
}
- result := NodeWriteBatchRawV2Result{}
+ result := NodeFetchBatchRawResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -13841,29 +14270,30 @@ func (p *NodeClient) recvWriteBatchRawV2() (err error) {
err = result.Err
return
}
+ value = result.GetSuccess()
return
}
// Parameters:
// - Req
-func (p *NodeClient) WriteTaggedBatchRaw(req *WriteTaggedBatchRawRequest) (err error) {
- if err = p.sendWriteTaggedBatchRaw(req); err != nil {
+func (p *NodeClient) FetchBatchRawV2(req *FetchBatchRawV2Request) (r *FetchBatchRawResult_, err error) {
+ if err = p.sendFetchBatchRawV2(req); err != nil {
return
}
- return p.recvWriteTaggedBatchRaw()
+ return p.recvFetchBatchRawV2()
}
-func (p *NodeClient) sendWriteTaggedBatchRaw(req *WriteTaggedBatchRawRequest) (err error) {
+func (p *NodeClient) sendFetchBatchRawV2(req *FetchBatchRawV2Request) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("fetchBatchRawV2", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeWriteTaggedBatchRawArgs{
+ args := NodeFetchBatchRawV2Args{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13875,7 +14305,7 @@ func (p *NodeClient) sendWriteTaggedBatchRaw(req *WriteTaggedBatchRawRequest) (e
return oprot.Flush()
}
-func (p *NodeClient) recvWriteTaggedBatchRaw() (err error) {
+func (p *NodeClient) recvFetchBatchRawV2() (value *FetchBatchRawResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13885,32 +14315,32 @@ func (p *NodeClient) recvWriteTaggedBatchRaw() (err error) {
if err != nil {
return
}
- if method != "writeTaggedBatchRaw" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeTaggedBatchRaw failed: wrong method name")
+ if method != "fetchBatchRawV2" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBatchRawV2 failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeTaggedBatchRaw failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBatchRawV2 failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error59 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error60 error
- error60, err = error59.Read(iprot)
+ error47 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error48 error
+ error48, err = error47.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error60
+ err = error48
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeTaggedBatchRaw failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBatchRawV2 failed: invalid message type")
return
}
- result := NodeWriteTaggedBatchRawResult{}
+ result := NodeFetchBatchRawV2Result{}
if err = result.Read(iprot); err != nil {
return
}
@@ -13921,29 +14351,30 @@ func (p *NodeClient) recvWriteTaggedBatchRaw() (err error) {
err = result.Err
return
}
+ value = result.GetSuccess()
return
}
// Parameters:
// - Req
-func (p *NodeClient) WriteTaggedBatchRawV2(req *WriteTaggedBatchRawV2Request) (err error) {
- if err = p.sendWriteTaggedBatchRawV2(req); err != nil {
+func (p *NodeClient) FetchBlocksRaw(req *FetchBlocksRawRequest) (r *FetchBlocksRawResult_, err error) {
+ if err = p.sendFetchBlocksRaw(req); err != nil {
return
}
- return p.recvWriteTaggedBatchRawV2()
+ return p.recvFetchBlocksRaw()
}
-func (p *NodeClient) sendWriteTaggedBatchRawV2(req *WriteTaggedBatchRawV2Request) (err error) {
+func (p *NodeClient) sendFetchBlocksRaw(req *FetchBlocksRawRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("fetchBlocksRaw", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeWriteTaggedBatchRawV2Args{
+ args := NodeFetchBlocksRawArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -13955,7 +14386,7 @@ func (p *NodeClient) sendWriteTaggedBatchRawV2(req *WriteTaggedBatchRawV2Request
return oprot.Flush()
}
-func (p *NodeClient) recvWriteTaggedBatchRawV2() (err error) {
+func (p *NodeClient) recvFetchBlocksRaw() (value *FetchBlocksRawResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -13965,32 +14396,32 @@ func (p *NodeClient) recvWriteTaggedBatchRawV2() (err error) {
if err != nil {
return
}
- if method != "writeTaggedBatchRawV2" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeTaggedBatchRawV2 failed: wrong method name")
+ if method != "fetchBlocksRaw" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBlocksRaw failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeTaggedBatchRawV2 failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBlocksRaw failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error61 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error62 error
- error62, err = error61.Read(iprot)
+ error49 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error50 error
+ error50, err = error49.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error62
+ err = error50
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeTaggedBatchRawV2 failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBlocksRaw failed: invalid message type")
return
}
- result := NodeWriteTaggedBatchRawV2Result{}
+ result := NodeFetchBlocksRawResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14001,27 +14432,32 @@ func (p *NodeClient) recvWriteTaggedBatchRawV2() (err error) {
err = result.Err
return
}
+ value = result.GetSuccess()
return
}
-func (p *NodeClient) Repair() (err error) {
- if err = p.sendRepair(); err != nil {
+// Parameters:
+// - Req
+func (p *NodeClient) FetchTagged(req *FetchTaggedRequest) (r *FetchTaggedResult_, err error) {
+ if err = p.sendFetchTagged(req); err != nil {
return
}
- return p.recvRepair()
+ return p.recvFetchTagged()
}
-func (p *NodeClient) sendRepair() (err error) {
+func (p *NodeClient) sendFetchTagged(req *FetchTaggedRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("repair", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("fetchTagged", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeRepairArgs{}
+ args := NodeFetchTaggedArgs{
+ Req: req,
+ }
if err = args.Write(oprot); err != nil {
return
}
@@ -14031,7 +14467,7 @@ func (p *NodeClient) sendRepair() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvRepair() (err error) {
+func (p *NodeClient) recvFetchTagged() (value *FetchTaggedResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14041,32 +14477,32 @@ func (p *NodeClient) recvRepair() (err error) {
if err != nil {
return
}
- if method != "repair" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "repair failed: wrong method name")
+ if method != "fetchTagged" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchTagged failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "repair failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchTagged failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error63 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error64 error
- error64, err = error63.Read(iprot)
+ error51 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error52 error
+ error52, err = error51.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error64
+ err = error52
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "repair failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchTagged failed: invalid message type")
return
}
- result := NodeRepairResult{}
+ result := NodeFetchTaggedResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14077,29 +14513,30 @@ func (p *NodeClient) recvRepair() (err error) {
err = result.Err
return
}
+ value = result.GetSuccess()
return
}
// Parameters:
// - Req
-func (p *NodeClient) Truncate(req *TruncateRequest) (r *TruncateResult_, err error) {
- if err = p.sendTruncate(req); err != nil {
+func (p *NodeClient) AggregateRaw(req *AggregateQueryRawRequest) (r *AggregateQueryRawResult_, err error) {
+ if err = p.sendAggregateRaw(req); err != nil {
return
}
- return p.recvTruncate()
+ return p.recvAggregateRaw()
}
-func (p *NodeClient) sendTruncate(req *TruncateRequest) (err error) {
+func (p *NodeClient) sendAggregateRaw(req *AggregateQueryRawRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("truncate", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("aggregateRaw", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeTruncateArgs{
+ args := NodeAggregateRawArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -14111,7 +14548,7 @@ func (p *NodeClient) sendTruncate(req *TruncateRequest) (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvTruncate() (value *TruncateResult_, err error) {
+func (p *NodeClient) recvAggregateRaw() (value *AggregateQueryRawResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14121,32 +14558,32 @@ func (p *NodeClient) recvTruncate() (value *TruncateResult_, err error) {
if err != nil {
return
}
- if method != "truncate" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "truncate failed: wrong method name")
- return
+ if method != "aggregateRaw" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "aggregateRaw failed: wrong method name")
+ return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "truncate failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "aggregateRaw failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error65 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error66 error
- error66, err = error65.Read(iprot)
+ error53 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error54 error
+ error54, err = error53.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error66
+ err = error54
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "truncate failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "aggregateRaw failed: invalid message type")
return
}
- result := NodeTruncateResult{}
+ result := NodeAggregateRawResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14161,24 +14598,28 @@ func (p *NodeClient) recvTruncate() (value *TruncateResult_, err error) {
return
}
-func (p *NodeClient) Health() (r *NodeHealthResult_, err error) {
- if err = p.sendHealth(); err != nil {
+// Parameters:
+// - Req
+func (p *NodeClient) FetchBlocksMetadataRawV2(req *FetchBlocksMetadataRawV2Request) (r *FetchBlocksMetadataRawV2Result_, err error) {
+ if err = p.sendFetchBlocksMetadataRawV2(req); err != nil {
return
}
- return p.recvHealth()
+ return p.recvFetchBlocksMetadataRawV2()
}
-func (p *NodeClient) sendHealth() (err error) {
+func (p *NodeClient) sendFetchBlocksMetadataRawV2(req *FetchBlocksMetadataRawV2Request) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("health", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeHealthArgs{}
+ args := NodeFetchBlocksMetadataRawV2Args{
+ Req: req,
+ }
if err = args.Write(oprot); err != nil {
return
}
@@ -14188,7 +14629,7 @@ func (p *NodeClient) sendHealth() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvHealth() (value *NodeHealthResult_, err error) {
+func (p *NodeClient) recvFetchBlocksMetadataRawV2() (value *FetchBlocksMetadataRawV2Result_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14198,32 +14639,32 @@ func (p *NodeClient) recvHealth() (value *NodeHealthResult_, err error) {
if err != nil {
return
}
- if method != "health" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "health failed: wrong method name")
+ if method != "fetchBlocksMetadataRawV2" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "fetchBlocksMetadataRawV2 failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "health failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "fetchBlocksMetadataRawV2 failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error67 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error68 error
- error68, err = error67.Read(iprot)
+ error55 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error56 error
+ error56, err = error55.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error68
+ err = error56
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "health failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "fetchBlocksMetadataRawV2 failed: invalid message type")
return
}
- result := NodeHealthResult{}
+ result := NodeFetchBlocksMetadataRawV2Result{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14238,24 +14679,28 @@ func (p *NodeClient) recvHealth() (value *NodeHealthResult_, err error) {
return
}
-func (p *NodeClient) Bootstrapped() (r *NodeBootstrappedResult_, err error) {
- if err = p.sendBootstrapped(); err != nil {
+// Parameters:
+// - Req
+func (p *NodeClient) WriteBatchRaw(req *WriteBatchRawRequest) (err error) {
+ if err = p.sendWriteBatchRaw(req); err != nil {
return
}
- return p.recvBootstrapped()
+ return p.recvWriteBatchRaw()
}
-func (p *NodeClient) sendBootstrapped() (err error) {
+func (p *NodeClient) sendWriteBatchRaw(req *WriteBatchRawRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("bootstrapped", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("writeBatchRaw", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeBootstrappedArgs{}
+ args := NodeWriteBatchRawArgs{
+ Req: req,
+ }
if err = args.Write(oprot); err != nil {
return
}
@@ -14265,7 +14710,7 @@ func (p *NodeClient) sendBootstrapped() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvBootstrapped() (value *NodeBootstrappedResult_, err error) {
+func (p *NodeClient) recvWriteBatchRaw() (err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14275,32 +14720,32 @@ func (p *NodeClient) recvBootstrapped() (value *NodeBootstrappedResult_, err err
if err != nil {
return
}
- if method != "bootstrapped" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "bootstrapped failed: wrong method name")
+ if method != "writeBatchRaw" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeBatchRaw failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "bootstrapped failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeBatchRaw failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error69 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error70 error
- error70, err = error69.Read(iprot)
+ error57 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error58 error
+ error58, err = error57.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error70
+ err = error58
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "bootstrapped failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeBatchRaw failed: invalid message type")
return
}
- result := NodeBootstrappedResult{}
+ result := NodeWriteBatchRawResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14311,28 +14756,31 @@ func (p *NodeClient) recvBootstrapped() (value *NodeBootstrappedResult_, err err
err = result.Err
return
}
- value = result.GetSuccess()
return
}
-func (p *NodeClient) BootstrappedInPlacementOrNoPlacement() (r *NodeBootstrappedInPlacementOrNoPlacementResult_, err error) {
- if err = p.sendBootstrappedInPlacementOrNoPlacement(); err != nil {
+// Parameters:
+// - Req
+func (p *NodeClient) WriteBatchRawV2(req *WriteBatchRawV2Request) (err error) {
+ if err = p.sendWriteBatchRawV2(req); err != nil {
return
}
- return p.recvBootstrappedInPlacementOrNoPlacement()
+ return p.recvWriteBatchRawV2()
}
-func (p *NodeClient) sendBootstrappedInPlacementOrNoPlacement() (err error) {
+func (p *NodeClient) sendWriteBatchRawV2(req *WriteBatchRawV2Request) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("writeBatchRawV2", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeBootstrappedInPlacementOrNoPlacementArgs{}
+ args := NodeWriteBatchRawV2Args{
+ Req: req,
+ }
if err = args.Write(oprot); err != nil {
return
}
@@ -14342,7 +14790,7 @@ func (p *NodeClient) sendBootstrappedInPlacementOrNoPlacement() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvBootstrappedInPlacementOrNoPlacement() (value *NodeBootstrappedInPlacementOrNoPlacementResult_, err error) {
+func (p *NodeClient) recvWriteBatchRawV2() (err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14352,32 +14800,32 @@ func (p *NodeClient) recvBootstrappedInPlacementOrNoPlacement() (value *NodeBoot
if err != nil {
return
}
- if method != "bootstrappedInPlacementOrNoPlacement" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "bootstrappedInPlacementOrNoPlacement failed: wrong method name")
+ if method != "writeBatchRawV2" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeBatchRawV2 failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "bootstrappedInPlacementOrNoPlacement failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeBatchRawV2 failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error71 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error72 error
- error72, err = error71.Read(iprot)
+ error59 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error60 error
+ error60, err = error59.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error72
+ err = error60
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "bootstrappedInPlacementOrNoPlacement failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeBatchRawV2 failed: invalid message type")
return
}
- result := NodeBootstrappedInPlacementOrNoPlacementResult{}
+ result := NodeWriteBatchRawV2Result{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14388,28 +14836,31 @@ func (p *NodeClient) recvBootstrappedInPlacementOrNoPlacement() (value *NodeBoot
err = result.Err
return
}
- value = result.GetSuccess()
return
}
-func (p *NodeClient) GetPersistRateLimit() (r *NodePersistRateLimitResult_, err error) {
- if err = p.sendGetPersistRateLimit(); err != nil {
+// Parameters:
+// - Req
+func (p *NodeClient) WriteTaggedBatchRaw(req *WriteTaggedBatchRawRequest) (err error) {
+ if err = p.sendWriteTaggedBatchRaw(req); err != nil {
return
}
- return p.recvGetPersistRateLimit()
+ return p.recvWriteTaggedBatchRaw()
}
-func (p *NodeClient) sendGetPersistRateLimit() (err error) {
+func (p *NodeClient) sendWriteTaggedBatchRaw(req *WriteTaggedBatchRawRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("getPersistRateLimit", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeGetPersistRateLimitArgs{}
+ args := NodeWriteTaggedBatchRawArgs{
+ Req: req,
+ }
if err = args.Write(oprot); err != nil {
return
}
@@ -14419,7 +14870,7 @@ func (p *NodeClient) sendGetPersistRateLimit() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvGetPersistRateLimit() (value *NodePersistRateLimitResult_, err error) {
+func (p *NodeClient) recvWriteTaggedBatchRaw() (err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14429,32 +14880,32 @@ func (p *NodeClient) recvGetPersistRateLimit() (value *NodePersistRateLimitResul
if err != nil {
return
}
- if method != "getPersistRateLimit" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getPersistRateLimit failed: wrong method name")
+ if method != "writeTaggedBatchRaw" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeTaggedBatchRaw failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getPersistRateLimit failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeTaggedBatchRaw failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error73 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error74 error
- error74, err = error73.Read(iprot)
+ error61 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error62 error
+ error62, err = error61.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error74
+ err = error62
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getPersistRateLimit failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeTaggedBatchRaw failed: invalid message type")
return
}
- result := NodeGetPersistRateLimitResult{}
+ result := NodeWriteTaggedBatchRawResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14465,30 +14916,29 @@ func (p *NodeClient) recvGetPersistRateLimit() (value *NodePersistRateLimitResul
err = result.Err
return
}
- value = result.GetSuccess()
return
}
// Parameters:
// - Req
-func (p *NodeClient) SetPersistRateLimit(req *NodeSetPersistRateLimitRequest) (r *NodePersistRateLimitResult_, err error) {
- if err = p.sendSetPersistRateLimit(req); err != nil {
+func (p *NodeClient) WriteTaggedBatchRawV2(req *WriteTaggedBatchRawV2Request) (err error) {
+ if err = p.sendWriteTaggedBatchRawV2(req); err != nil {
return
}
- return p.recvSetPersistRateLimit()
+ return p.recvWriteTaggedBatchRawV2()
}
-func (p *NodeClient) sendSetPersistRateLimit(req *NodeSetPersistRateLimitRequest) (err error) {
+func (p *NodeClient) sendWriteTaggedBatchRawV2(req *WriteTaggedBatchRawV2Request) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("setPersistRateLimit", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeSetPersistRateLimitArgs{
+ args := NodeWriteTaggedBatchRawV2Args{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -14500,7 +14950,7 @@ func (p *NodeClient) sendSetPersistRateLimit(req *NodeSetPersistRateLimitRequest
return oprot.Flush()
}
-func (p *NodeClient) recvSetPersistRateLimit() (value *NodePersistRateLimitResult_, err error) {
+func (p *NodeClient) recvWriteTaggedBatchRawV2() (err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14510,32 +14960,32 @@ func (p *NodeClient) recvSetPersistRateLimit() (value *NodePersistRateLimitResul
if err != nil {
return
}
- if method != "setPersistRateLimit" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setPersistRateLimit failed: wrong method name")
+ if method != "writeTaggedBatchRawV2" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "writeTaggedBatchRawV2 failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setPersistRateLimit failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "writeTaggedBatchRawV2 failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error75 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error76 error
- error76, err = error75.Read(iprot)
+ error63 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error64 error
+ error64, err = error63.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error76
+ err = error64
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setPersistRateLimit failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "writeTaggedBatchRawV2 failed: invalid message type")
return
}
- result := NodeSetPersistRateLimitResult{}
+ result := NodeWriteTaggedBatchRawV2Result{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14546,28 +14996,27 @@ func (p *NodeClient) recvSetPersistRateLimit() (value *NodePersistRateLimitResul
err = result.Err
return
}
- value = result.GetSuccess()
return
}
-func (p *NodeClient) GetWriteNewSeriesAsync() (r *NodeWriteNewSeriesAsyncResult_, err error) {
- if err = p.sendGetWriteNewSeriesAsync(); err != nil {
+func (p *NodeClient) Repair() (err error) {
+ if err = p.sendRepair(); err != nil {
return
}
- return p.recvGetWriteNewSeriesAsync()
+ return p.recvRepair()
}
-func (p *NodeClient) sendGetWriteNewSeriesAsync() (err error) {
+func (p *NodeClient) sendRepair() (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("repair", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeGetWriteNewSeriesAsyncArgs{}
+ args := NodeRepairArgs{}
if err = args.Write(oprot); err != nil {
return
}
@@ -14577,7 +15026,7 @@ func (p *NodeClient) sendGetWriteNewSeriesAsync() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvGetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyncResult_, err error) {
+func (p *NodeClient) recvRepair() (err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14587,32 +15036,32 @@ func (p *NodeClient) recvGetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyn
if err != nil {
return
}
- if method != "getWriteNewSeriesAsync" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getWriteNewSeriesAsync failed: wrong method name")
+ if method != "repair" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "repair failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getWriteNewSeriesAsync failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "repair failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error77 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error78 error
- error78, err = error77.Read(iprot)
+ error65 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error66 error
+ error66, err = error65.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error78
+ err = error66
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getWriteNewSeriesAsync failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "repair failed: invalid message type")
return
}
- result := NodeGetWriteNewSeriesAsyncResult{}
+ result := NodeRepairResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14623,30 +15072,29 @@ func (p *NodeClient) recvGetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyn
err = result.Err
return
}
- value = result.GetSuccess()
return
}
// Parameters:
// - Req
-func (p *NodeClient) SetWriteNewSeriesAsync(req *NodeSetWriteNewSeriesAsyncRequest) (r *NodeWriteNewSeriesAsyncResult_, err error) {
- if err = p.sendSetWriteNewSeriesAsync(req); err != nil {
+func (p *NodeClient) Truncate(req *TruncateRequest) (r *TruncateResult_, err error) {
+ if err = p.sendTruncate(req); err != nil {
return
}
- return p.recvSetWriteNewSeriesAsync()
+ return p.recvTruncate()
}
-func (p *NodeClient) sendSetWriteNewSeriesAsync(req *NodeSetWriteNewSeriesAsyncRequest) (err error) {
+func (p *NodeClient) sendTruncate(req *TruncateRequest) (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("truncate", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeSetWriteNewSeriesAsyncArgs{
+ args := NodeTruncateArgs{
Req: req,
}
if err = args.Write(oprot); err != nil {
@@ -14658,7 +15106,7 @@ func (p *NodeClient) sendSetWriteNewSeriesAsync(req *NodeSetWriteNewSeriesAsyncR
return oprot.Flush()
}
-func (p *NodeClient) recvSetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyncResult_, err error) {
+func (p *NodeClient) recvTruncate() (value *TruncateResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14668,32 +15116,32 @@ func (p *NodeClient) recvSetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyn
if err != nil {
return
}
- if method != "setWriteNewSeriesAsync" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setWriteNewSeriesAsync failed: wrong method name")
+ if method != "truncate" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "truncate failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setWriteNewSeriesAsync failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "truncate failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error79 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error80 error
- error80, err = error79.Read(iprot)
+ error67 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error68 error
+ error68, err = error67.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error80
+ err = error68
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setWriteNewSeriesAsync failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "truncate failed: invalid message type")
return
}
- result := NodeSetWriteNewSeriesAsyncResult{}
+ result := NodeTruncateResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14708,24 +15156,24 @@ func (p *NodeClient) recvSetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyn
return
}
-func (p *NodeClient) GetWriteNewSeriesBackoffDuration() (r *NodeWriteNewSeriesBackoffDurationResult_, err error) {
- if err = p.sendGetWriteNewSeriesBackoffDuration(); err != nil {
+func (p *NodeClient) Health() (r *NodeHealthResult_, err error) {
+ if err = p.sendHealth(); err != nil {
return
}
- return p.recvGetWriteNewSeriesBackoffDuration()
+ return p.recvHealth()
}
-func (p *NodeClient) sendGetWriteNewSeriesBackoffDuration() (err error) {
+func (p *NodeClient) sendHealth() (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("health", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeGetWriteNewSeriesBackoffDurationArgs{}
+ args := NodeHealthArgs{}
if err = args.Write(oprot); err != nil {
return
}
@@ -14735,7 +15183,7 @@ func (p *NodeClient) sendGetWriteNewSeriesBackoffDuration() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvGetWriteNewSeriesBackoffDuration() (value *NodeWriteNewSeriesBackoffDurationResult_, err error) {
+func (p *NodeClient) recvHealth() (value *NodeHealthResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14745,32 +15193,32 @@ func (p *NodeClient) recvGetWriteNewSeriesBackoffDuration() (value *NodeWriteNew
if err != nil {
return
}
- if method != "getWriteNewSeriesBackoffDuration" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getWriteNewSeriesBackoffDuration failed: wrong method name")
+ if method != "health" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "health failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getWriteNewSeriesBackoffDuration failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "health failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error81 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error82 error
- error82, err = error81.Read(iprot)
+ error69 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error70 error
+ error70, err = error69.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error82
+ err = error70
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getWriteNewSeriesBackoffDuration failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "health failed: invalid message type")
return
}
- result := NodeGetWriteNewSeriesBackoffDurationResult{}
+ result := NodeHealthResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14785,28 +15233,24 @@ func (p *NodeClient) recvGetWriteNewSeriesBackoffDuration() (value *NodeWriteNew
return
}
-// Parameters:
-// - Req
-func (p *NodeClient) SetWriteNewSeriesBackoffDuration(req *NodeSetWriteNewSeriesBackoffDurationRequest) (r *NodeWriteNewSeriesBackoffDurationResult_, err error) {
- if err = p.sendSetWriteNewSeriesBackoffDuration(req); err != nil {
+func (p *NodeClient) Bootstrapped() (r *NodeBootstrappedResult_, err error) {
+ if err = p.sendBootstrapped(); err != nil {
return
}
- return p.recvSetWriteNewSeriesBackoffDuration()
+ return p.recvBootstrapped()
}
-func (p *NodeClient) sendSetWriteNewSeriesBackoffDuration(req *NodeSetWriteNewSeriesBackoffDurationRequest) (err error) {
+func (p *NodeClient) sendBootstrapped() (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("bootstrapped", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeSetWriteNewSeriesBackoffDurationArgs{
- Req: req,
- }
+ args := NodeBootstrappedArgs{}
if err = args.Write(oprot); err != nil {
return
}
@@ -14816,7 +15260,7 @@ func (p *NodeClient) sendSetWriteNewSeriesBackoffDuration(req *NodeSetWriteNewSe
return oprot.Flush()
}
-func (p *NodeClient) recvSetWriteNewSeriesBackoffDuration() (value *NodeWriteNewSeriesBackoffDurationResult_, err error) {
+func (p *NodeClient) recvBootstrapped() (value *NodeBootstrappedResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14826,32 +15270,32 @@ func (p *NodeClient) recvSetWriteNewSeriesBackoffDuration() (value *NodeWriteNew
if err != nil {
return
}
- if method != "setWriteNewSeriesBackoffDuration" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setWriteNewSeriesBackoffDuration failed: wrong method name")
+ if method != "bootstrapped" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "bootstrapped failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setWriteNewSeriesBackoffDuration failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "bootstrapped failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error83 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error84 error
- error84, err = error83.Read(iprot)
+ error71 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error72 error
+ error72, err = error71.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error84
+ err = error72
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setWriteNewSeriesBackoffDuration failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "bootstrapped failed: invalid message type")
return
}
- result := NodeSetWriteNewSeriesBackoffDurationResult{}
+ result := NodeBootstrappedResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14866,24 +15310,24 @@ func (p *NodeClient) recvSetWriteNewSeriesBackoffDuration() (value *NodeWriteNew
return
}
-func (p *NodeClient) GetWriteNewSeriesLimitPerShardPerSecond() (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
- if err = p.sendGetWriteNewSeriesLimitPerShardPerSecond(); err != nil {
+func (p *NodeClient) BootstrappedInPlacementOrNoPlacement() (r *NodeBootstrappedInPlacementOrNoPlacementResult_, err error) {
+ if err = p.sendBootstrappedInPlacementOrNoPlacement(); err != nil {
return
}
- return p.recvGetWriteNewSeriesLimitPerShardPerSecond()
+ return p.recvBootstrappedInPlacementOrNoPlacement()
}
-func (p *NodeClient) sendGetWriteNewSeriesLimitPerShardPerSecond() (err error) {
+func (p *NodeClient) sendBootstrappedInPlacementOrNoPlacement() (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeGetWriteNewSeriesLimitPerShardPerSecondArgs{}
+ args := NodeBootstrappedInPlacementOrNoPlacementArgs{}
if err = args.Write(oprot); err != nil {
return
}
@@ -14893,7 +15337,7 @@ func (p *NodeClient) sendGetWriteNewSeriesLimitPerShardPerSecond() (err error) {
return oprot.Flush()
}
-func (p *NodeClient) recvGetWriteNewSeriesLimitPerShardPerSecond() (value *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
+func (p *NodeClient) recvBootstrappedInPlacementOrNoPlacement() (value *NodeBootstrappedInPlacementOrNoPlacementResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14903,32 +15347,32 @@ func (p *NodeClient) recvGetWriteNewSeriesLimitPerShardPerSecond() (value *NodeW
if err != nil {
return
}
- if method != "getWriteNewSeriesLimitPerShardPerSecond" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getWriteNewSeriesLimitPerShardPerSecond failed: wrong method name")
+ if method != "bootstrappedInPlacementOrNoPlacement" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "bootstrappedInPlacementOrNoPlacement failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getWriteNewSeriesLimitPerShardPerSecond failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "bootstrappedInPlacementOrNoPlacement failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error85 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error86 error
- error86, err = error85.Read(iprot)
+ error73 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error74 error
+ error74, err = error73.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error86
+ err = error74
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getWriteNewSeriesLimitPerShardPerSecond failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "bootstrappedInPlacementOrNoPlacement failed: invalid message type")
return
}
- result := NodeGetWriteNewSeriesLimitPerShardPerSecondResult{}
+ result := NodeBootstrappedInPlacementOrNoPlacementResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -14943,28 +15387,24 @@ func (p *NodeClient) recvGetWriteNewSeriesLimitPerShardPerSecond() (value *NodeW
return
}
-// Parameters:
-// - Req
-func (p *NodeClient) SetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
- if err = p.sendSetWriteNewSeriesLimitPerShardPerSecond(req); err != nil {
+func (p *NodeClient) GetPersistRateLimit() (r *NodePersistRateLimitResult_, err error) {
+ if err = p.sendGetPersistRateLimit(); err != nil {
return
}
- return p.recvSetWriteNewSeriesLimitPerShardPerSecond()
+ return p.recvGetPersistRateLimit()
}
-func (p *NodeClient) sendSetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (err error) {
+func (p *NodeClient) sendGetPersistRateLimit() (err error) {
oprot := p.OutputProtocol
if oprot == nil {
oprot = p.ProtocolFactory.GetProtocol(p.Transport)
p.OutputProtocol = oprot
}
p.SeqId++
- if err = oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.CALL, p.SeqId); err != nil {
+ if err = oprot.WriteMessageBegin("getPersistRateLimit", thrift.CALL, p.SeqId); err != nil {
return
}
- args := NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{
- Req: req,
- }
+ args := NodeGetPersistRateLimitArgs{}
if err = args.Write(oprot); err != nil {
return
}
@@ -14974,7 +15414,7 @@ func (p *NodeClient) sendSetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWri
return oprot.Flush()
}
-func (p *NodeClient) recvSetWriteNewSeriesLimitPerShardPerSecond() (value *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
+func (p *NodeClient) recvGetPersistRateLimit() (value *NodePersistRateLimitResult_, err error) {
iprot := p.InputProtocol
if iprot == nil {
iprot = p.ProtocolFactory.GetProtocol(p.Transport)
@@ -14984,32 +15424,32 @@ func (p *NodeClient) recvSetWriteNewSeriesLimitPerShardPerSecond() (value *NodeW
if err != nil {
return
}
- if method != "setWriteNewSeriesLimitPerShardPerSecond" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setWriteNewSeriesLimitPerShardPerSecond failed: wrong method name")
+ if method != "getPersistRateLimit" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getPersistRateLimit failed: wrong method name")
return
}
if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setWriteNewSeriesLimitPerShardPerSecond failed: out of sequence response")
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getPersistRateLimit failed: out of sequence response")
return
}
if mTypeId == thrift.EXCEPTION {
- error87 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error88 error
- error88, err = error87.Read(iprot)
+ error75 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error76 error
+ error76, err = error75.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error88
+ err = error76
return
}
if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setWriteNewSeriesLimitPerShardPerSecond failed: invalid message type")
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getPersistRateLimit failed: invalid message type")
return
}
- result := NodeSetWriteNewSeriesLimitPerShardPerSecondResult{}
+ result := NodeGetPersistRateLimitResult{}
if err = result.Read(iprot); err != nil {
return
}
@@ -15024,914 +15464,888 @@ func (p *NodeClient) recvSetWriteNewSeriesLimitPerShardPerSecond() (value *NodeW
return
}
-type NodeProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Node
-}
-
-func (p *NodeProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *NodeProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *NodeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewNodeProcessor(handler Node) *NodeProcessor {
-
- self89 := &NodeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self89.processorMap["query"] = &nodeProcessorQuery{handler: handler}
- self89.processorMap["aggregateRaw"] = &nodeProcessorAggregateRaw{handler: handler}
- self89.processorMap["aggregate"] = &nodeProcessorAggregate{handler: handler}
- self89.processorMap["fetch"] = &nodeProcessorFetch{handler: handler}
- self89.processorMap["fetchTagged"] = &nodeProcessorFetchTagged{handler: handler}
- self89.processorMap["write"] = &nodeProcessorWrite{handler: handler}
- self89.processorMap["writeTagged"] = &nodeProcessorWriteTagged{handler: handler}
- self89.processorMap["fetchBatchRaw"] = &nodeProcessorFetchBatchRaw{handler: handler}
- self89.processorMap["fetchBatchRawV2"] = &nodeProcessorFetchBatchRawV2{handler: handler}
- self89.processorMap["fetchBlocksRaw"] = &nodeProcessorFetchBlocksRaw{handler: handler}
- self89.processorMap["fetchBlocksMetadataRawV2"] = &nodeProcessorFetchBlocksMetadataRawV2{handler: handler}
- self89.processorMap["writeBatchRaw"] = &nodeProcessorWriteBatchRaw{handler: handler}
- self89.processorMap["writeBatchRawV2"] = &nodeProcessorWriteBatchRawV2{handler: handler}
- self89.processorMap["writeTaggedBatchRaw"] = &nodeProcessorWriteTaggedBatchRaw{handler: handler}
- self89.processorMap["writeTaggedBatchRawV2"] = &nodeProcessorWriteTaggedBatchRawV2{handler: handler}
- self89.processorMap["repair"] = &nodeProcessorRepair{handler: handler}
- self89.processorMap["truncate"] = &nodeProcessorTruncate{handler: handler}
- self89.processorMap["health"] = &nodeProcessorHealth{handler: handler}
- self89.processorMap["bootstrapped"] = &nodeProcessorBootstrapped{handler: handler}
- self89.processorMap["bootstrappedInPlacementOrNoPlacement"] = &nodeProcessorBootstrappedInPlacementOrNoPlacement{handler: handler}
- self89.processorMap["getPersistRateLimit"] = &nodeProcessorGetPersistRateLimit{handler: handler}
- self89.processorMap["setPersistRateLimit"] = &nodeProcessorSetPersistRateLimit{handler: handler}
- self89.processorMap["getWriteNewSeriesAsync"] = &nodeProcessorGetWriteNewSeriesAsync{handler: handler}
- self89.processorMap["setWriteNewSeriesAsync"] = &nodeProcessorSetWriteNewSeriesAsync{handler: handler}
- self89.processorMap["getWriteNewSeriesBackoffDuration"] = &nodeProcessorGetWriteNewSeriesBackoffDuration{handler: handler}
- self89.processorMap["setWriteNewSeriesBackoffDuration"] = &nodeProcessorSetWriteNewSeriesBackoffDuration{handler: handler}
- self89.processorMap["getWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond{handler: handler}
- self89.processorMap["setWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond{handler: handler}
- return self89
-}
-
-func (p *NodeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(seqId, iprot, oprot)
+// Parameters:
+// - Req
+func (p *NodeClient) SetPersistRateLimit(req *NodeSetPersistRateLimitRequest) (r *NodePersistRateLimitResult_, err error) {
+ if err = p.sendSetPersistRateLimit(req); err != nil {
+ return
}
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x90 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x90.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, x90
-
-}
-
-type nodeProcessorQuery struct {
- handler Node
+ return p.recvSetPersistRateLimit()
}
-func (p *nodeProcessorQuery) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeQueryArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("query", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) sendSetPersistRateLimit(req *NodeSetPersistRateLimitRequest) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
-
- iprot.ReadMessageEnd()
- result := NodeQueryResult{}
- var retval *QueryResult_
- var err2 error
- if retval, err2 = p.handler.Query(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query: "+err2.Error())
- oprot.WriteMessageBegin("query", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("setPersistRateLimit", thrift.CALL, p.SeqId); err != nil {
+ return
}
- if err2 = oprot.WriteMessageBegin("query", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ args := NodeSetPersistRateLimitArgs{
+ Req: req,
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if err = args.Write(oprot); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvSetPersistRateLimit() (value *NodePersistRateLimitResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
- return true, err
-}
-
-type nodeProcessorAggregateRaw struct {
- handler Node
-}
-
-func (p *nodeProcessorAggregateRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeAggregateRawArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("aggregateRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+ if method != "setPersistRateLimit" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setPersistRateLimit failed: wrong method name")
+ return
}
-
- iprot.ReadMessageEnd()
- result := NodeAggregateRawResult{}
- var retval *AggregateQueryRawResult_
- var err2 error
- if retval, err2 = p.handler.AggregateRaw(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing aggregateRaw: "+err2.Error())
- oprot.WriteMessageBegin("aggregateRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setPersistRateLimit failed: out of sequence response")
+ return
}
- if err2 = oprot.WriteMessageBegin("aggregateRaw", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error77 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error78 error
+ error78, err = error77.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error78
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setPersistRateLimit failed: invalid message type")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ result := NodeSetPersistRateLimitResult{}
+ if err = result.Read(iprot); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
- if err != nil {
+ if result.Err != nil {
+ err = result.Err
return
}
- return true, err
-}
-
-type nodeProcessorAggregate struct {
- handler Node
+ value = result.GetSuccess()
+ return
}
-func (p *nodeProcessorAggregate) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeAggregateArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("aggregate", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) GetWriteNewSeriesAsync() (r *NodeWriteNewSeriesAsyncResult_, err error) {
+ if err = p.sendGetWriteNewSeriesAsync(); err != nil {
+ return
}
+ return p.recvGetWriteNewSeriesAsync()
+}
- iprot.ReadMessageEnd()
- result := NodeAggregateResult{}
- var retval *AggregateQueryResult_
- var err2 error
- if retval, err2 = p.handler.Aggregate(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing aggregate: "+err2.Error())
- oprot.WriteMessageBegin("aggregate", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+func (p *NodeClient) sendGetWriteNewSeriesAsync() (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
- if err2 = oprot.WriteMessageBegin("aggregate", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.CALL, p.SeqId); err != nil {
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ args := NodeGetWriteNewSeriesAsyncArgs{}
+ if err = args.Write(oprot); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvGetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyncResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
- return true, err
-}
-
-type nodeProcessorFetch struct {
- handler Node
-}
-
-func (p *nodeProcessorFetch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeFetchArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("fetch", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+ if method != "getWriteNewSeriesAsync" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getWriteNewSeriesAsync failed: wrong method name")
+ return
}
-
- iprot.ReadMessageEnd()
- result := NodeFetchResult{}
- var retval *FetchResult_
- var err2 error
- if retval, err2 = p.handler.Fetch(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetch: "+err2.Error())
- oprot.WriteMessageBegin("fetch", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getWriteNewSeriesAsync failed: out of sequence response")
+ return
}
- if err2 = oprot.WriteMessageBegin("fetch", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error79 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error80 error
+ error80, err = error79.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error80
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getWriteNewSeriesAsync failed: invalid message type")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ result := NodeGetWriteNewSeriesAsyncResult{}
+ if err = result.Read(iprot); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
- if err != nil {
+ if result.Err != nil {
+ err = result.Err
return
}
- return true, err
+ value = result.GetSuccess()
+ return
}
-type nodeProcessorFetchTagged struct {
- handler Node
+// Parameters:
+// - Req
+func (p *NodeClient) SetWriteNewSeriesAsync(req *NodeSetWriteNewSeriesAsyncRequest) (r *NodeWriteNewSeriesAsyncResult_, err error) {
+ if err = p.sendSetWriteNewSeriesAsync(req); err != nil {
+ return
+ }
+ return p.recvSetWriteNewSeriesAsync()
}
-func (p *nodeProcessorFetchTagged) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeFetchTaggedArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("fetchTagged", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) sendSetWriteNewSeriesAsync(req *NodeSetWriteNewSeriesAsyncRequest) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
-
- iprot.ReadMessageEnd()
- result := NodeFetchTaggedResult{}
- var retval *FetchTaggedResult_
- var err2 error
- if retval, err2 = p.handler.FetchTagged(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchTagged: "+err2.Error())
- oprot.WriteMessageBegin("fetchTagged", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.CALL, p.SeqId); err != nil {
+ return
}
- if err2 = oprot.WriteMessageBegin("fetchTagged", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ args := NodeSetWriteNewSeriesAsyncArgs{
+ Req: req,
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if err = args.Write(oprot); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvSetWriteNewSeriesAsync() (value *NodeWriteNewSeriesAsyncResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
- return true, err
-}
-
-type nodeProcessorWrite struct {
- handler Node
-}
-
-func (p *nodeProcessorWrite) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeWriteArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("write", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+ if method != "setWriteNewSeriesAsync" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setWriteNewSeriesAsync failed: wrong method name")
+ return
}
-
- iprot.ReadMessageEnd()
- result := NodeWriteResult{}
- var err2 error
- if err2 = p.handler.Write(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing write: "+err2.Error())
- oprot.WriteMessageBegin("write", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setWriteNewSeriesAsync failed: out of sequence response")
+ return
}
- if err2 = oprot.WriteMessageBegin("write", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error81 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error82 error
+ error82, err = error81.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error82
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setWriteNewSeriesAsync failed: invalid message type")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ result := NodeSetWriteNewSeriesAsyncResult{}
+ if err = result.Read(iprot); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
- if err != nil {
+ if result.Err != nil {
+ err = result.Err
return
}
- return true, err
-}
-
-type nodeProcessorWriteTagged struct {
- handler Node
+ value = result.GetSuccess()
+ return
}
-func (p *nodeProcessorWriteTagged) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeWriteTaggedArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("writeTagged", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) GetWriteNewSeriesBackoffDuration() (r *NodeWriteNewSeriesBackoffDurationResult_, err error) {
+ if err = p.sendGetWriteNewSeriesBackoffDuration(); err != nil {
+ return
}
+ return p.recvGetWriteNewSeriesBackoffDuration()
+}
- iprot.ReadMessageEnd()
- result := NodeWriteTaggedResult{}
- var err2 error
- if err2 = p.handler.WriteTagged(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeTagged: "+err2.Error())
- oprot.WriteMessageBegin("writeTagged", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
+func (p *NodeClient) sendGetWriteNewSeriesBackoffDuration() (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
- if err2 = oprot.WriteMessageBegin("writeTagged", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.CALL, p.SeqId); err != nil {
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ args := NodeGetWriteNewSeriesBackoffDurationArgs{}
+ if err = args.Write(oprot); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvGetWriteNewSeriesBackoffDuration() (value *NodeWriteNewSeriesBackoffDurationResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
- return true, err
-}
-
-type nodeProcessorFetchBatchRaw struct {
- handler Node
-}
-
-func (p *nodeProcessorFetchBatchRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeFetchBatchRawArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("fetchBatchRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+ if method != "getWriteNewSeriesBackoffDuration" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getWriteNewSeriesBackoffDuration failed: wrong method name")
+ return
}
-
- iprot.ReadMessageEnd()
- result := NodeFetchBatchRawResult{}
- var retval *FetchBatchRawResult_
- var err2 error
- if retval, err2 = p.handler.FetchBatchRaw(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBatchRaw: "+err2.Error())
- oprot.WriteMessageBegin("fetchBatchRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getWriteNewSeriesBackoffDuration failed: out of sequence response")
+ return
}
- if err2 = oprot.WriteMessageBegin("fetchBatchRaw", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error83 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error84 error
+ error84, err = error83.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error84
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getWriteNewSeriesBackoffDuration failed: invalid message type")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ result := NodeGetWriteNewSeriesBackoffDurationResult{}
+ if err = result.Read(iprot); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
- if err != nil {
+ if result.Err != nil {
+ err = result.Err
return
}
- return true, err
+ value = result.GetSuccess()
+ return
}
-type nodeProcessorFetchBatchRawV2 struct {
- handler Node
+// Parameters:
+// - Req
+func (p *NodeClient) SetWriteNewSeriesBackoffDuration(req *NodeSetWriteNewSeriesBackoffDurationRequest) (r *NodeWriteNewSeriesBackoffDurationResult_, err error) {
+ if err = p.sendSetWriteNewSeriesBackoffDuration(req); err != nil {
+ return
+ }
+ return p.recvSetWriteNewSeriesBackoffDuration()
}
-func (p *nodeProcessorFetchBatchRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeFetchBatchRawV2Args{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("fetchBatchRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
- }
-
- iprot.ReadMessageEnd()
- result := NodeFetchBatchRawV2Result{}
- var retval *FetchBatchRawResult_
- var err2 error
- if retval, err2 = p.handler.FetchBatchRawV2(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBatchRawV2: "+err2.Error())
- oprot.WriteMessageBegin("fetchBatchRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("fetchBatchRawV2", thrift.REPLY, seqId); err2 != nil {
- err = err2
+func (p *NodeClient) sendSetWriteNewSeriesBackoffDuration(req *NodeSetWriteNewSeriesBackoffDurationRequest) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.CALL, p.SeqId); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ args := NodeSetWriteNewSeriesBackoffDurationArgs{
+ Req: req,
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if err = args.Write(oprot); err != nil {
+ return
}
- if err != nil {
+ if err = oprot.WriteMessageEnd(); err != nil {
return
}
- return true, err
-}
-
-type nodeProcessorFetchBlocksRaw struct {
- handler Node
+ return oprot.Flush()
}
-func (p *nodeProcessorFetchBlocksRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeFetchBlocksRawArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("fetchBlocksRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) recvSetWriteNewSeriesBackoffDuration() (value *NodeWriteNewSeriesBackoffDurationResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
-
- iprot.ReadMessageEnd()
- result := NodeFetchBlocksRawResult{}
- var retval *FetchBlocksRawResult_
- var err2 error
- if retval, err2 = p.handler.FetchBlocksRaw(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBlocksRaw: "+err2.Error())
- oprot.WriteMessageBegin("fetchBlocksRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
}
- if err2 = oprot.WriteMessageBegin("fetchBlocksRaw", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if method != "setWriteNewSeriesBackoffDuration" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setWriteNewSeriesBackoffDuration failed: wrong method name")
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setWriteNewSeriesBackoffDuration failed: out of sequence response")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error85 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error86 error
+ error86, err = error85.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error86
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setWriteNewSeriesBackoffDuration failed: invalid message type")
+ return
}
- if err != nil {
+ result := NodeSetWriteNewSeriesBackoffDurationResult{}
+ if err = result.Read(iprot); err != nil {
return
}
- return true, err
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ if result.Err != nil {
+ err = result.Err
+ return
+ }
+ value = result.GetSuccess()
+ return
}
-type nodeProcessorFetchBlocksMetadataRawV2 struct {
- handler Node
+func (p *NodeClient) GetWriteNewSeriesLimitPerShardPerSecond() (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
+ if err = p.sendGetWriteNewSeriesLimitPerShardPerSecond(); err != nil {
+ return
+ }
+ return p.recvGetWriteNewSeriesLimitPerShardPerSecond()
}
-func (p *nodeProcessorFetchBlocksMetadataRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeFetchBlocksMetadataRawV2Args{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) sendGetWriteNewSeriesLimitPerShardPerSecond() (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := NodeGetWriteNewSeriesLimitPerShardPerSecondArgs{}
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
}
+ return oprot.Flush()
+}
- iprot.ReadMessageEnd()
- result := NodeFetchBlocksMetadataRawV2Result{}
- var retval *FetchBlocksMetadataRawV2Result_
- var err2 error
- if retval, err2 = p.handler.FetchBlocksMetadataRawV2(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBlocksMetadataRawV2: "+err2.Error())
- oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+func (p *NodeClient) recvGetWriteNewSeriesLimitPerShardPerSecond() (value *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
- if err2 = oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if method != "getWriteNewSeriesLimitPerShardPerSecond" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getWriteNewSeriesLimitPerShardPerSecond failed: wrong method name")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getWriteNewSeriesLimitPerShardPerSecond failed: out of sequence response")
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error87 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error88 error
+ error88, err = error87.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error88
+ return
}
- if err != nil {
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getWriteNewSeriesLimitPerShardPerSecond failed: invalid message type")
return
}
- return true, err
+ result := NodeGetWriteNewSeriesLimitPerShardPerSecondResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ if result.Err != nil {
+ err = result.Err
+ return
+ }
+ value = result.GetSuccess()
+ return
}
-type nodeProcessorWriteBatchRaw struct {
- handler Node
+// Parameters:
+// - Req
+func (p *NodeClient) SetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (r *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
+ if err = p.sendSetWriteNewSeriesLimitPerShardPerSecond(req); err != nil {
+ return
+ }
+ return p.recvSetWriteNewSeriesLimitPerShardPerSecond()
}
-func (p *nodeProcessorWriteBatchRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeWriteBatchRawArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("writeBatchRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) sendSetWriteNewSeriesLimitPerShardPerSecond(req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
-
- iprot.ReadMessageEnd()
- result := NodeWriteBatchRawResult{}
- var err2 error
- if err2 = p.handler.WriteBatchRaw(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *WriteBatchRawErrors:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeBatchRaw: "+err2.Error())
- oprot.WriteMessageBegin("writeBatchRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.CALL, p.SeqId); err != nil {
+ return
}
- if err2 = oprot.WriteMessageBegin("writeBatchRaw", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ args := NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{
+ Req: req,
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if err = args.Write(oprot); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvSetWriteNewSeriesLimitPerShardPerSecond() (value *NodeWriteNewSeriesLimitPerShardPerSecondResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
- return true, err
-}
-
-type nodeProcessorWriteBatchRawV2 struct {
- handler Node
-}
-
-func (p *nodeProcessorWriteBatchRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeWriteBatchRawV2Args{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("writeBatchRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+ if method != "setWriteNewSeriesLimitPerShardPerSecond" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "setWriteNewSeriesLimitPerShardPerSecond failed: wrong method name")
+ return
}
-
- iprot.ReadMessageEnd()
- result := NodeWriteBatchRawV2Result{}
- var err2 error
- if err2 = p.handler.WriteBatchRawV2(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *WriteBatchRawErrors:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeBatchRawV2: "+err2.Error())
- oprot.WriteMessageBegin("writeBatchRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "setWriteNewSeriesLimitPerShardPerSecond failed: out of sequence response")
+ return
}
- if err2 = oprot.WriteMessageBegin("writeBatchRawV2", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error89 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error90 error
+ error90, err = error89.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error90
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "setWriteNewSeriesLimitPerShardPerSecond failed: invalid message type")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ result := NodeSetWriteNewSeriesLimitPerShardPerSecondResult{}
+ if err = result.Read(iprot); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
- if err != nil {
+ if result.Err != nil {
+ err = result.Err
return
}
- return true, err
+ value = result.GetSuccess()
+ return
}
-type nodeProcessorWriteTaggedBatchRaw struct {
- handler Node
+// Parameters:
+// - Req
+func (p *NodeClient) DebugProfileStart(req *DebugProfileStartRequest) (r *DebugProfileStartResult_, err error) {
+ if err = p.sendDebugProfileStart(req); err != nil {
+ return
+ }
+ return p.recvDebugProfileStart()
}
-func (p *nodeProcessorWriteTaggedBatchRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeWriteTaggedBatchRawArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) sendDebugProfileStart(req *DebugProfileStartRequest) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
-
- iprot.ReadMessageEnd()
- result := NodeWriteTaggedBatchRawResult{}
- var err2 error
- if err2 = p.handler.WriteTaggedBatchRaw(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *WriteBatchRawErrors:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeTaggedBatchRaw: "+err2.Error())
- oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("debugProfileStart", thrift.CALL, p.SeqId); err != nil {
+ return
}
- if err2 = oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ args := NodeDebugProfileStartArgs{
+ Req: req,
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if err = args.Write(oprot); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvDebugProfileStart() (value *DebugProfileStartResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
}
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
- return true, err
-}
-
-type nodeProcessorWriteTaggedBatchRawV2 struct {
- handler Node
-}
-
-func (p *nodeProcessorWriteTaggedBatchRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeWriteTaggedBatchRawV2Args{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+ if method != "debugProfileStart" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "debugProfileStart failed: wrong method name")
+ return
}
-
- iprot.ReadMessageEnd()
- result := NodeWriteTaggedBatchRawV2Result{}
- var err2 error
- if err2 = p.handler.WriteTaggedBatchRawV2(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *WriteBatchRawErrors:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeTaggedBatchRawV2: "+err2.Error())
- oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "debugProfileStart failed: out of sequence response")
+ return
}
- if err2 = oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if mTypeId == thrift.EXCEPTION {
+ error91 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error92 error
+ error92, err = error91.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error92
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "debugProfileStart failed: invalid message type")
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ result := NodeDebugProfileStartResult{}
+ if err = result.Read(iprot); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
- if err != nil {
+ if result.Err != nil {
+ err = result.Err
return
}
- return true, err
+ value = result.GetSuccess()
+ return
}
-type nodeProcessorRepair struct {
- handler Node
+// Parameters:
+// - Req
+func (p *NodeClient) DebugProfileStop(req *DebugProfileStopRequest) (r *DebugProfileStopResult_, err error) {
+ if err = p.sendDebugProfileStop(req); err != nil {
+ return
+ }
+ return p.recvDebugProfileStop()
}
-func (p *nodeProcessorRepair) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeRepairArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("repair", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeClient) sendDebugProfileStop(req *DebugProfileStopRequest) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
}
-
- iprot.ReadMessageEnd()
- result := NodeRepairResult{}
- var err2 error
- if err2 = p.handler.Repair(); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing repair: "+err2.Error())
- oprot.WriteMessageBegin("repair", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("debugProfileStop", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := NodeDebugProfileStopArgs{
+ Req: req,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvDebugProfileStop() (value *DebugProfileStopResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return
+ }
+ if method != "debugProfileStop" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "debugProfileStop failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "debugProfileStop failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error93 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error94 error
+ error94, err = error93.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
+ err = error94
+ return
}
- if err2 = oprot.WriteMessageBegin("repair", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "debugProfileStop failed: invalid message type")
+ return
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ result := NodeDebugProfileStopResult{}
+ if err = result.Read(iprot); err != nil {
+ return
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ if result.Err != nil {
+ err = result.Err
+ return
}
+ value = result.GetSuccess()
+ return
+}
+
+// Parameters:
+// - Req
+func (p *NodeClient) DebugIndexMemorySegments(req *DebugIndexMemorySegmentsRequest) (r *DebugIndexMemorySegmentsResult_, err error) {
+ if err = p.sendDebugIndexMemorySegments(req); err != nil {
+ return
+ }
+ return p.recvDebugIndexMemorySegments()
+}
+
+func (p *NodeClient) sendDebugIndexMemorySegments(req *DebugIndexMemorySegmentsRequest) (err error) {
+ oprot := p.OutputProtocol
+ if oprot == nil {
+ oprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.OutputProtocol = oprot
+ }
+ p.SeqId++
+ if err = oprot.WriteMessageBegin("debugIndexMemorySegments", thrift.CALL, p.SeqId); err != nil {
+ return
+ }
+ args := NodeDebugIndexMemorySegmentsArgs{
+ Req: req,
+ }
+ if err = args.Write(oprot); err != nil {
+ return
+ }
+ if err = oprot.WriteMessageEnd(); err != nil {
+ return
+ }
+ return oprot.Flush()
+}
+
+func (p *NodeClient) recvDebugIndexMemorySegments() (value *DebugIndexMemorySegmentsResult_, err error) {
+ iprot := p.InputProtocol
+ if iprot == nil {
+ iprot = p.ProtocolFactory.GetProtocol(p.Transport)
+ p.InputProtocol = iprot
+ }
+ method, mTypeId, seqId, err := iprot.ReadMessageBegin()
if err != nil {
return
}
- return true, err
+ if method != "debugIndexMemorySegments" {
+ err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "debugIndexMemorySegments failed: wrong method name")
+ return
+ }
+ if p.SeqId != seqId {
+ err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "debugIndexMemorySegments failed: out of sequence response")
+ return
+ }
+ if mTypeId == thrift.EXCEPTION {
+ error95 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error96 error
+ error96, err = error95.Read(iprot)
+ if err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ err = error96
+ return
+ }
+ if mTypeId != thrift.REPLY {
+ err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "debugIndexMemorySegments failed: invalid message type")
+ return
+ }
+ result := NodeDebugIndexMemorySegmentsResult{}
+ if err = result.Read(iprot); err != nil {
+ return
+ }
+ if err = iprot.ReadMessageEnd(); err != nil {
+ return
+ }
+ if result.Err != nil {
+ err = result.Err
+ return
+ }
+ value = result.GetSuccess()
+ return
}
-type nodeProcessorTruncate struct {
+type NodeProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Node
+}
+
+func (p *NodeProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *NodeProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *NodeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewNodeProcessor(handler Node) *NodeProcessor {
+
+ self97 := &NodeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self97.processorMap["query"] = &nodeProcessorQuery{handler: handler}
+ self97.processorMap["aggregate"] = &nodeProcessorAggregate{handler: handler}
+ self97.processorMap["fetch"] = &nodeProcessorFetch{handler: handler}
+ self97.processorMap["write"] = &nodeProcessorWrite{handler: handler}
+ self97.processorMap["writeTagged"] = &nodeProcessorWriteTagged{handler: handler}
+ self97.processorMap["fetchBatchRaw"] = &nodeProcessorFetchBatchRaw{handler: handler}
+ self97.processorMap["fetchBatchRawV2"] = &nodeProcessorFetchBatchRawV2{handler: handler}
+ self97.processorMap["fetchBlocksRaw"] = &nodeProcessorFetchBlocksRaw{handler: handler}
+ self97.processorMap["fetchTagged"] = &nodeProcessorFetchTagged{handler: handler}
+ self97.processorMap["aggregateRaw"] = &nodeProcessorAggregateRaw{handler: handler}
+ self97.processorMap["fetchBlocksMetadataRawV2"] = &nodeProcessorFetchBlocksMetadataRawV2{handler: handler}
+ self97.processorMap["writeBatchRaw"] = &nodeProcessorWriteBatchRaw{handler: handler}
+ self97.processorMap["writeBatchRawV2"] = &nodeProcessorWriteBatchRawV2{handler: handler}
+ self97.processorMap["writeTaggedBatchRaw"] = &nodeProcessorWriteTaggedBatchRaw{handler: handler}
+ self97.processorMap["writeTaggedBatchRawV2"] = &nodeProcessorWriteTaggedBatchRawV2{handler: handler}
+ self97.processorMap["repair"] = &nodeProcessorRepair{handler: handler}
+ self97.processorMap["truncate"] = &nodeProcessorTruncate{handler: handler}
+ self97.processorMap["health"] = &nodeProcessorHealth{handler: handler}
+ self97.processorMap["bootstrapped"] = &nodeProcessorBootstrapped{handler: handler}
+ self97.processorMap["bootstrappedInPlacementOrNoPlacement"] = &nodeProcessorBootstrappedInPlacementOrNoPlacement{handler: handler}
+ self97.processorMap["getPersistRateLimit"] = &nodeProcessorGetPersistRateLimit{handler: handler}
+ self97.processorMap["setPersistRateLimit"] = &nodeProcessorSetPersistRateLimit{handler: handler}
+ self97.processorMap["getWriteNewSeriesAsync"] = &nodeProcessorGetWriteNewSeriesAsync{handler: handler}
+ self97.processorMap["setWriteNewSeriesAsync"] = &nodeProcessorSetWriteNewSeriesAsync{handler: handler}
+ self97.processorMap["getWriteNewSeriesBackoffDuration"] = &nodeProcessorGetWriteNewSeriesBackoffDuration{handler: handler}
+ self97.processorMap["setWriteNewSeriesBackoffDuration"] = &nodeProcessorSetWriteNewSeriesBackoffDuration{handler: handler}
+ self97.processorMap["getWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond{handler: handler}
+ self97.processorMap["setWriteNewSeriesLimitPerShardPerSecond"] = &nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond{handler: handler}
+ self97.processorMap["debugProfileStart"] = &nodeProcessorDebugProfileStart{handler: handler}
+ self97.processorMap["debugProfileStop"] = &nodeProcessorDebugProfileStop{handler: handler}
+ self97.processorMap["debugIndexMemorySegments"] = &nodeProcessorDebugIndexMemorySegments{handler: handler}
+ return self97
+}
+
+func (p *NodeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x98 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x98.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, x98
+
+}
+
+type nodeProcessorQuery struct {
handler Node
}
-func (p *nodeProcessorTruncate) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeTruncateArgs{}
+func (p *nodeProcessorQuery) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeQueryArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("truncate", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("query", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -15939,16 +16353,16 @@ func (p *nodeProcessorTruncate) Process(seqId int32, iprot, oprot thrift.TProtoc
}
iprot.ReadMessageEnd()
- result := NodeTruncateResult{}
- var retval *TruncateResult_
+ result := NodeQueryResult{}
+ var retval *QueryResult_
var err2 error
- if retval, err2 = p.handler.Truncate(args.Req); err2 != nil {
+ if retval, err2 = p.handler.Query(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing truncate: "+err2.Error())
- oprot.WriteMessageBegin("truncate", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query: "+err2.Error())
+ oprot.WriteMessageBegin("query", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -15957,7 +16371,7 @@ func (p *nodeProcessorTruncate) Process(seqId int32, iprot, oprot thrift.TProtoc
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("truncate", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("query", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -15975,16 +16389,16 @@ func (p *nodeProcessorTruncate) Process(seqId int32, iprot, oprot thrift.TProtoc
return true, err
}
-type nodeProcessorHealth struct {
+type nodeProcessorAggregate struct {
handler Node
}
-func (p *nodeProcessorHealth) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeHealthArgs{}
+func (p *nodeProcessorAggregate) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeAggregateArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("health", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("aggregate", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -15992,16 +16406,16 @@ func (p *nodeProcessorHealth) Process(seqId int32, iprot, oprot thrift.TProtocol
}
iprot.ReadMessageEnd()
- result := NodeHealthResult{}
- var retval *NodeHealthResult_
+ result := NodeAggregateResult{}
+ var retval *AggregateQueryResult_
var err2 error
- if retval, err2 = p.handler.Health(); err2 != nil {
+ if retval, err2 = p.handler.Aggregate(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing health: "+err2.Error())
- oprot.WriteMessageBegin("health", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing aggregate: "+err2.Error())
+ oprot.WriteMessageBegin("aggregate", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16010,7 +16424,7 @@ func (p *nodeProcessorHealth) Process(seqId int32, iprot, oprot thrift.TProtocol
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("health", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("aggregate", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16028,16 +16442,16 @@ func (p *nodeProcessorHealth) Process(seqId int32, iprot, oprot thrift.TProtocol
return true, err
}
-type nodeProcessorBootstrapped struct {
+type nodeProcessorFetch struct {
handler Node
}
-func (p *nodeProcessorBootstrapped) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeBootstrappedArgs{}
+func (p *nodeProcessorFetch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeFetchArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("bootstrapped", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("fetch", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16045,16 +16459,16 @@ func (p *nodeProcessorBootstrapped) Process(seqId int32, iprot, oprot thrift.TPr
}
iprot.ReadMessageEnd()
- result := NodeBootstrappedResult{}
- var retval *NodeBootstrappedResult_
+ result := NodeFetchResult{}
+ var retval *FetchResult_
var err2 error
- if retval, err2 = p.handler.Bootstrapped(); err2 != nil {
+ if retval, err2 = p.handler.Fetch(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing bootstrapped: "+err2.Error())
- oprot.WriteMessageBegin("bootstrapped", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetch: "+err2.Error())
+ oprot.WriteMessageBegin("fetch", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16063,7 +16477,7 @@ func (p *nodeProcessorBootstrapped) Process(seqId int32, iprot, oprot thrift.TPr
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("bootstrapped", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("fetch", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16081,16 +16495,16 @@ func (p *nodeProcessorBootstrapped) Process(seqId int32, iprot, oprot thrift.TPr
return true, err
}
-type nodeProcessorBootstrappedInPlacementOrNoPlacement struct {
+type nodeProcessorWrite struct {
handler Node
}
-func (p *nodeProcessorBootstrappedInPlacementOrNoPlacement) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeBootstrappedInPlacementOrNoPlacementArgs{}
+func (p *nodeProcessorWrite) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeWriteArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("write", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16098,25 +16512,22 @@ func (p *nodeProcessorBootstrappedInPlacementOrNoPlacement) Process(seqId int32,
}
iprot.ReadMessageEnd()
- result := NodeBootstrappedInPlacementOrNoPlacementResult{}
- var retval *NodeBootstrappedInPlacementOrNoPlacementResult_
+ result := NodeWriteResult{}
var err2 error
- if retval, err2 = p.handler.BootstrappedInPlacementOrNoPlacement(); err2 != nil {
+ if err2 = p.handler.Write(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing bootstrappedInPlacementOrNoPlacement: "+err2.Error())
- oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing write: "+err2.Error())
+ oprot.WriteMessageBegin("write", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return true, err2
}
- } else {
- result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("write", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16134,16 +16545,16 @@ func (p *nodeProcessorBootstrappedInPlacementOrNoPlacement) Process(seqId int32,
return true, err
}
-type nodeProcessorGetPersistRateLimit struct {
+type nodeProcessorWriteTagged struct {
handler Node
}
-func (p *nodeProcessorGetPersistRateLimit) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeGetPersistRateLimitArgs{}
+func (p *nodeProcessorWriteTagged) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeWriteTaggedArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("getPersistRateLimit", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("writeTagged", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16151,25 +16562,22 @@ func (p *nodeProcessorGetPersistRateLimit) Process(seqId int32, iprot, oprot thr
}
iprot.ReadMessageEnd()
- result := NodeGetPersistRateLimitResult{}
- var retval *NodePersistRateLimitResult_
+ result := NodeWriteTaggedResult{}
var err2 error
- if retval, err2 = p.handler.GetPersistRateLimit(); err2 != nil {
+ if err2 = p.handler.WriteTagged(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getPersistRateLimit: "+err2.Error())
- oprot.WriteMessageBegin("getPersistRateLimit", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeTagged: "+err2.Error())
+ oprot.WriteMessageBegin("writeTagged", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
return true, err2
}
- } else {
- result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("getPersistRateLimit", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("writeTagged", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16187,16 +16595,16 @@ func (p *nodeProcessorGetPersistRateLimit) Process(seqId int32, iprot, oprot thr
return true, err
}
-type nodeProcessorSetPersistRateLimit struct {
+type nodeProcessorFetchBatchRaw struct {
handler Node
}
-func (p *nodeProcessorSetPersistRateLimit) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeSetPersistRateLimitArgs{}
+func (p *nodeProcessorFetchBatchRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeFetchBatchRawArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("setPersistRateLimit", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("fetchBatchRaw", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16204,16 +16612,16 @@ func (p *nodeProcessorSetPersistRateLimit) Process(seqId int32, iprot, oprot thr
}
iprot.ReadMessageEnd()
- result := NodeSetPersistRateLimitResult{}
- var retval *NodePersistRateLimitResult_
+ result := NodeFetchBatchRawResult{}
+ var retval *FetchBatchRawResult_
var err2 error
- if retval, err2 = p.handler.SetPersistRateLimit(args.Req); err2 != nil {
+ if retval, err2 = p.handler.FetchBatchRaw(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setPersistRateLimit: "+err2.Error())
- oprot.WriteMessageBegin("setPersistRateLimit", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBatchRaw: "+err2.Error())
+ oprot.WriteMessageBegin("fetchBatchRaw", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16222,7 +16630,7 @@ func (p *nodeProcessorSetPersistRateLimit) Process(seqId int32, iprot, oprot thr
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("setPersistRateLimit", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("fetchBatchRaw", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16240,16 +16648,16 @@ func (p *nodeProcessorSetPersistRateLimit) Process(seqId int32, iprot, oprot thr
return true, err
}
-type nodeProcessorGetWriteNewSeriesAsync struct {
+type nodeProcessorFetchBatchRawV2 struct {
handler Node
}
-func (p *nodeProcessorGetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeGetWriteNewSeriesAsyncArgs{}
+func (p *nodeProcessorFetchBatchRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeFetchBatchRawV2Args{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("fetchBatchRawV2", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16257,16 +16665,16 @@ func (p *nodeProcessorGetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot
}
iprot.ReadMessageEnd()
- result := NodeGetWriteNewSeriesAsyncResult{}
- var retval *NodeWriteNewSeriesAsyncResult_
+ result := NodeFetchBatchRawV2Result{}
+ var retval *FetchBatchRawResult_
var err2 error
- if retval, err2 = p.handler.GetWriteNewSeriesAsync(); err2 != nil {
+ if retval, err2 = p.handler.FetchBatchRawV2(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesAsync: "+err2.Error())
- oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBatchRawV2: "+err2.Error())
+ oprot.WriteMessageBegin("fetchBatchRawV2", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16275,7 +16683,7 @@ func (p *nodeProcessorGetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("fetchBatchRawV2", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16293,16 +16701,16 @@ func (p *nodeProcessorGetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot
return true, err
}
-type nodeProcessorSetWriteNewSeriesAsync struct {
+type nodeProcessorFetchBlocksRaw struct {
handler Node
}
-func (p *nodeProcessorSetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeSetWriteNewSeriesAsyncArgs{}
+func (p *nodeProcessorFetchBlocksRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeFetchBlocksRawArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("fetchBlocksRaw", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16310,16 +16718,16 @@ func (p *nodeProcessorSetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot
}
iprot.ReadMessageEnd()
- result := NodeSetWriteNewSeriesAsyncResult{}
- var retval *NodeWriteNewSeriesAsyncResult_
+ result := NodeFetchBlocksRawResult{}
+ var retval *FetchBlocksRawResult_
var err2 error
- if retval, err2 = p.handler.SetWriteNewSeriesAsync(args.Req); err2 != nil {
+ if retval, err2 = p.handler.FetchBlocksRaw(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesAsync: "+err2.Error())
- oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBlocksRaw: "+err2.Error())
+ oprot.WriteMessageBegin("fetchBlocksRaw", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16328,7 +16736,7 @@ func (p *nodeProcessorSetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("fetchBlocksRaw", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16346,16 +16754,16 @@ func (p *nodeProcessorSetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot
return true, err
}
-type nodeProcessorGetWriteNewSeriesBackoffDuration struct {
+type nodeProcessorFetchTagged struct {
handler Node
}
-func (p *nodeProcessorGetWriteNewSeriesBackoffDuration) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeGetWriteNewSeriesBackoffDurationArgs{}
+func (p *nodeProcessorFetchTagged) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeFetchTaggedArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("fetchTagged", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16363,16 +16771,16 @@ func (p *nodeProcessorGetWriteNewSeriesBackoffDuration) Process(seqId int32, ipr
}
iprot.ReadMessageEnd()
- result := NodeGetWriteNewSeriesBackoffDurationResult{}
- var retval *NodeWriteNewSeriesBackoffDurationResult_
+ result := NodeFetchTaggedResult{}
+ var retval *FetchTaggedResult_
var err2 error
- if retval, err2 = p.handler.GetWriteNewSeriesBackoffDuration(); err2 != nil {
+ if retval, err2 = p.handler.FetchTagged(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesBackoffDuration: "+err2.Error())
- oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchTagged: "+err2.Error())
+ oprot.WriteMessageBegin("fetchTagged", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16381,7 +16789,7 @@ func (p *nodeProcessorGetWriteNewSeriesBackoffDuration) Process(seqId int32, ipr
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("fetchTagged", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16399,16 +16807,16 @@ func (p *nodeProcessorGetWriteNewSeriesBackoffDuration) Process(seqId int32, ipr
return true, err
}
-type nodeProcessorSetWriteNewSeriesBackoffDuration struct {
+type nodeProcessorAggregateRaw struct {
handler Node
}
-func (p *nodeProcessorSetWriteNewSeriesBackoffDuration) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeSetWriteNewSeriesBackoffDurationArgs{}
+func (p *nodeProcessorAggregateRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeAggregateRawArgs{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("aggregateRaw", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16416,16 +16824,16 @@ func (p *nodeProcessorSetWriteNewSeriesBackoffDuration) Process(seqId int32, ipr
}
iprot.ReadMessageEnd()
- result := NodeSetWriteNewSeriesBackoffDurationResult{}
- var retval *NodeWriteNewSeriesBackoffDurationResult_
+ result := NodeAggregateRawResult{}
+ var retval *AggregateQueryRawResult_
var err2 error
- if retval, err2 = p.handler.SetWriteNewSeriesBackoffDuration(args.Req); err2 != nil {
+ if retval, err2 = p.handler.AggregateRaw(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesBackoffDuration: "+err2.Error())
- oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing aggregateRaw: "+err2.Error())
+ oprot.WriteMessageBegin("aggregateRaw", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16434,7 +16842,7 @@ func (p *nodeProcessorSetWriteNewSeriesBackoffDuration) Process(seqId int32, ipr
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("aggregateRaw", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16452,16 +16860,16 @@ func (p *nodeProcessorSetWriteNewSeriesBackoffDuration) Process(seqId int32, ipr
return true, err
}
-type nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond struct {
+type nodeProcessorFetchBlocksMetadataRawV2 struct {
handler Node
}
-func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeGetWriteNewSeriesLimitPerShardPerSecondArgs{}
+func (p *nodeProcessorFetchBlocksMetadataRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeFetchBlocksMetadataRawV2Args{}
if err = args.Read(iprot); err != nil {
iprot.ReadMessageEnd()
x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
+ oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16469,16 +16877,16 @@ func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int
}
iprot.ReadMessageEnd()
- result := NodeGetWriteNewSeriesLimitPerShardPerSecondResult{}
- var retval *NodeWriteNewSeriesLimitPerShardPerSecondResult_
+ result := NodeFetchBlocksMetadataRawV2Result{}
+ var retval *FetchBlocksMetadataRawV2Result_
var err2 error
- if retval, err2 = p.handler.GetWriteNewSeriesLimitPerShardPerSecond(); err2 != nil {
+ if retval, err2 = p.handler.FetchBlocksMetadataRawV2(args.Req); err2 != nil {
switch v := err2.(type) {
case *Error:
result.Err = v
default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesLimitPerShardPerSecond: "+err2.Error())
- oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing fetchBlocksMetadataRawV2: "+err2.Error())
+ oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.EXCEPTION, seqId)
x.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
@@ -16487,7 +16895,107 @@ func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int
} else {
result.Success = retval
}
- if err2 = oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil {
+ if err2 = oprot.WriteMessageBegin("fetchBlocksMetadataRawV2", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorWriteBatchRaw struct {
+ handler Node
+}
+
+func (p *nodeProcessorWriteBatchRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeWriteBatchRawArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("writeBatchRaw", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeWriteBatchRawResult{}
+ var err2 error
+ if err2 = p.handler.WriteBatchRaw(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *WriteBatchRawErrors:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeBatchRaw: "+err2.Error())
+ oprot.WriteMessageBegin("writeBatchRaw", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ }
+ if err2 = oprot.WriteMessageBegin("writeBatchRaw", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorWriteBatchRawV2 struct {
+ handler Node
+}
+
+func (p *nodeProcessorWriteBatchRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeWriteBatchRawV2Args{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("writeBatchRawV2", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeWriteBatchRawV2Result{}
+ var err2 error
+ if err2 = p.handler.WriteBatchRawV2(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *WriteBatchRawErrors:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeBatchRawV2: "+err2.Error())
+ oprot.WriteMessageBegin("writeBatchRawV2", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ }
+ if err2 = oprot.WriteMessageBegin("writeBatchRawV2", thrift.REPLY, seqId); err2 != nil {
err = err2
}
if err2 = result.Write(oprot); err == nil && err2 != nil {
@@ -16499,90 +17007,1747 @@ func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int
if err2 = oprot.Flush(); err == nil && err2 != nil {
err = err2
}
- if err != nil {
- return
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorWriteTaggedBatchRaw struct {
+ handler Node
+}
+
+func (p *nodeProcessorWriteTaggedBatchRaw) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeWriteTaggedBatchRawArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeWriteTaggedBatchRawResult{}
+ var err2 error
+ if err2 = p.handler.WriteTaggedBatchRaw(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *WriteBatchRawErrors:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeTaggedBatchRaw: "+err2.Error())
+ oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ }
+ if err2 = oprot.WriteMessageBegin("writeTaggedBatchRaw", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorWriteTaggedBatchRawV2 struct {
+ handler Node
+}
+
+func (p *nodeProcessorWriteTaggedBatchRawV2) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeWriteTaggedBatchRawV2Args{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeWriteTaggedBatchRawV2Result{}
+ var err2 error
+ if err2 = p.handler.WriteTaggedBatchRawV2(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *WriteBatchRawErrors:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing writeTaggedBatchRawV2: "+err2.Error())
+ oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ }
+ if err2 = oprot.WriteMessageBegin("writeTaggedBatchRawV2", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorRepair struct {
+ handler Node
+}
+
+func (p *nodeProcessorRepair) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeRepairArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("repair", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeRepairResult{}
+ var err2 error
+ if err2 = p.handler.Repair(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing repair: "+err2.Error())
+ oprot.WriteMessageBegin("repair", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ }
+ if err2 = oprot.WriteMessageBegin("repair", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorTruncate struct {
+ handler Node
+}
+
+func (p *nodeProcessorTruncate) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeTruncateArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("truncate", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeTruncateResult{}
+ var retval *TruncateResult_
+ var err2 error
+ if retval, err2 = p.handler.Truncate(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing truncate: "+err2.Error())
+ oprot.WriteMessageBegin("truncate", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("truncate", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorHealth struct {
+ handler Node
+}
+
+func (p *nodeProcessorHealth) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeHealthArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("health", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeHealthResult{}
+ var retval *NodeHealthResult_
+ var err2 error
+ if retval, err2 = p.handler.Health(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing health: "+err2.Error())
+ oprot.WriteMessageBegin("health", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("health", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorBootstrapped struct {
+ handler Node
+}
+
+func (p *nodeProcessorBootstrapped) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeBootstrappedArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("bootstrapped", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeBootstrappedResult{}
+ var retval *NodeBootstrappedResult_
+ var err2 error
+ if retval, err2 = p.handler.Bootstrapped(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing bootstrapped: "+err2.Error())
+ oprot.WriteMessageBegin("bootstrapped", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("bootstrapped", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorBootstrappedInPlacementOrNoPlacement struct {
+ handler Node
+}
+
+func (p *nodeProcessorBootstrappedInPlacementOrNoPlacement) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeBootstrappedInPlacementOrNoPlacementArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeBootstrappedInPlacementOrNoPlacementResult{}
+ var retval *NodeBootstrappedInPlacementOrNoPlacementResult_
+ var err2 error
+ if retval, err2 = p.handler.BootstrappedInPlacementOrNoPlacement(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing bootstrappedInPlacementOrNoPlacement: "+err2.Error())
+ oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("bootstrappedInPlacementOrNoPlacement", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorGetPersistRateLimit struct {
+ handler Node
+}
+
+func (p *nodeProcessorGetPersistRateLimit) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeGetPersistRateLimitArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getPersistRateLimit", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeGetPersistRateLimitResult{}
+ var retval *NodePersistRateLimitResult_
+ var err2 error
+ if retval, err2 = p.handler.GetPersistRateLimit(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getPersistRateLimit: "+err2.Error())
+ oprot.WriteMessageBegin("getPersistRateLimit", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getPersistRateLimit", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorSetPersistRateLimit struct {
+ handler Node
+}
+
+func (p *nodeProcessorSetPersistRateLimit) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeSetPersistRateLimitArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("setPersistRateLimit", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeSetPersistRateLimitResult{}
+ var retval *NodePersistRateLimitResult_
+ var err2 error
+ if retval, err2 = p.handler.SetPersistRateLimit(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setPersistRateLimit: "+err2.Error())
+ oprot.WriteMessageBegin("setPersistRateLimit", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("setPersistRateLimit", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorGetWriteNewSeriesAsync struct {
+ handler Node
+}
+
+func (p *nodeProcessorGetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeGetWriteNewSeriesAsyncArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeGetWriteNewSeriesAsyncResult{}
+ var retval *NodeWriteNewSeriesAsyncResult_
+ var err2 error
+ if retval, err2 = p.handler.GetWriteNewSeriesAsync(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesAsync: "+err2.Error())
+ oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getWriteNewSeriesAsync", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorSetWriteNewSeriesAsync struct {
+ handler Node
+}
+
+func (p *nodeProcessorSetWriteNewSeriesAsync) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeSetWriteNewSeriesAsyncArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeSetWriteNewSeriesAsyncResult{}
+ var retval *NodeWriteNewSeriesAsyncResult_
+ var err2 error
+ if retval, err2 = p.handler.SetWriteNewSeriesAsync(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesAsync: "+err2.Error())
+ oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("setWriteNewSeriesAsync", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorGetWriteNewSeriesBackoffDuration struct {
+ handler Node
+}
+
+func (p *nodeProcessorGetWriteNewSeriesBackoffDuration) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeGetWriteNewSeriesBackoffDurationArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeGetWriteNewSeriesBackoffDurationResult{}
+ var retval *NodeWriteNewSeriesBackoffDurationResult_
+ var err2 error
+ if retval, err2 = p.handler.GetWriteNewSeriesBackoffDuration(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesBackoffDuration: "+err2.Error())
+ oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getWriteNewSeriesBackoffDuration", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorSetWriteNewSeriesBackoffDuration struct {
+ handler Node
+}
+
+func (p *nodeProcessorSetWriteNewSeriesBackoffDuration) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeSetWriteNewSeriesBackoffDurationArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeSetWriteNewSeriesBackoffDurationResult{}
+ var retval *NodeWriteNewSeriesBackoffDurationResult_
+ var err2 error
+ if retval, err2 = p.handler.SetWriteNewSeriesBackoffDuration(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesBackoffDuration: "+err2.Error())
+ oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("setWriteNewSeriesBackoffDuration", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond struct {
+ handler Node
+}
+
+func (p *nodeProcessorGetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeGetWriteNewSeriesLimitPerShardPerSecondArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeGetWriteNewSeriesLimitPerShardPerSecondResult{}
+ var retval *NodeWriteNewSeriesLimitPerShardPerSecondResult_
+ var err2 error
+ if retval, err2 = p.handler.GetWriteNewSeriesLimitPerShardPerSecond(); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getWriteNewSeriesLimitPerShardPerSecond: "+err2.Error())
+ oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("getWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond struct {
+ handler Node
+}
+
+func (p *nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeSetWriteNewSeriesLimitPerShardPerSecondResult{}
+ var retval *NodeWriteNewSeriesLimitPerShardPerSecondResult_
+ var err2 error
+ if retval, err2 = p.handler.SetWriteNewSeriesLimitPerShardPerSecond(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesLimitPerShardPerSecond: "+err2.Error())
+ oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorDebugProfileStart struct {
+ handler Node
+}
+
+func (p *nodeProcessorDebugProfileStart) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeDebugProfileStartArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("debugProfileStart", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeDebugProfileStartResult{}
+ var retval *DebugProfileStartResult_
+ var err2 error
+ if retval, err2 = p.handler.DebugProfileStart(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing debugProfileStart: "+err2.Error())
+ oprot.WriteMessageBegin("debugProfileStart", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("debugProfileStart", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorDebugProfileStop struct {
+ handler Node
+}
+
+func (p *nodeProcessorDebugProfileStop) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeDebugProfileStopArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("debugProfileStop", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeDebugProfileStopResult{}
+ var retval *DebugProfileStopResult_
+ var err2 error
+ if retval, err2 = p.handler.DebugProfileStop(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing debugProfileStop: "+err2.Error())
+ oprot.WriteMessageBegin("debugProfileStop", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("debugProfileStop", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type nodeProcessorDebugIndexMemorySegments struct {
+ handler Node
+}
+
+func (p *nodeProcessorDebugIndexMemorySegments) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := NodeDebugIndexMemorySegmentsArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("debugIndexMemorySegments", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ result := NodeDebugIndexMemorySegmentsResult{}
+ var retval *DebugIndexMemorySegmentsResult_
+ var err2 error
+ if retval, err2 = p.handler.DebugIndexMemorySegments(args.Req); err2 != nil {
+ switch v := err2.(type) {
+ case *Error:
+ result.Err = v
+ default:
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing debugIndexMemorySegments: "+err2.Error())
+ oprot.WriteMessageBegin("debugIndexMemorySegments", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush()
+ return true, err2
+ }
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("debugIndexMemorySegments", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Req
+type NodeQueryArgs struct {
+ Req *QueryRequest `thrift:"req,1" db:"req" json:"req"`
+}
+
+func NewNodeQueryArgs() *NodeQueryArgs {
+ return &NodeQueryArgs{}
+}
+
+var NodeQueryArgs_Req_DEFAULT *QueryRequest
+
+func (p *NodeQueryArgs) GetReq() *QueryRequest {
+ if !p.IsSetReq() {
+ return NodeQueryArgs_Req_DEFAULT
+ }
+ return p.Req
+}
+func (p *NodeQueryArgs) IsSetReq() bool {
+ return p.Req != nil
+}
+
+func (p *NodeQueryArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *NodeQueryArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &QueryRequest{
+ RangeType: 0,
+
+ ResultTimeType: 0,
+ }
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeQueryArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("query_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *NodeQueryArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeQueryArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("NodeQueryArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+// - Err
+type NodeQueryResult struct {
+ Success *QueryResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+}
+
+func NewNodeQueryResult() *NodeQueryResult {
+ return &NodeQueryResult{}
+}
+
+var NodeQueryResult_Success_DEFAULT *QueryResult_
+
+func (p *NodeQueryResult) GetSuccess() *QueryResult_ {
+ if !p.IsSetSuccess() {
+ return NodeQueryResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeQueryResult_Err_DEFAULT *Error
+
+func (p *NodeQueryResult) GetErr() *Error {
+ if !p.IsSetErr() {
+ return NodeQueryResult_Err_DEFAULT
+ }
+ return p.Err
+}
+func (p *NodeQueryResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeQueryResult) IsSetErr() bool {
+ return p.Err != nil
+}
+
+func (p *NodeQueryResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *NodeQueryResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &QueryResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *NodeQueryResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &Error{
+ Type: 0,
+ }
+ if err := p.Err.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
+ }
+ return nil
+}
+
+func (p *NodeQueryResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("query_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *NodeQueryResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeQueryResult) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetErr() {
+ if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
+ }
+ if err := p.Err.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeQueryResult) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("NodeQueryResult(%+v)", *p)
+}
+
+// Attributes:
+// - Req
+type NodeAggregateArgs struct {
+ Req *AggregateQueryRequest `thrift:"req,1" db:"req" json:"req"`
+}
+
+func NewNodeAggregateArgs() *NodeAggregateArgs {
+ return &NodeAggregateArgs{}
+}
+
+var NodeAggregateArgs_Req_DEFAULT *AggregateQueryRequest
+
+func (p *NodeAggregateArgs) GetReq() *AggregateQueryRequest {
+ if !p.IsSetReq() {
+ return NodeAggregateArgs_Req_DEFAULT
+ }
+ return p.Req
+}
+func (p *NodeAggregateArgs) IsSetReq() bool {
+ return p.Req != nil
+}
+
+func (p *NodeAggregateArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &AggregateQueryRequest{
+ AggregateQueryType: 1,
+
+ RangeType: 0,
+ }
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("aggregate_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeAggregateArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("NodeAggregateArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+// - Err
+type NodeAggregateResult struct {
+ Success *AggregateQueryResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+}
+
+func NewNodeAggregateResult() *NodeAggregateResult {
+ return &NodeAggregateResult{}
+}
+
+var NodeAggregateResult_Success_DEFAULT *AggregateQueryResult_
+
+func (p *NodeAggregateResult) GetSuccess() *AggregateQueryResult_ {
+ if !p.IsSetSuccess() {
+ return NodeAggregateResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeAggregateResult_Err_DEFAULT *Error
+
+func (p *NodeAggregateResult) GetErr() *Error {
+ if !p.IsSetErr() {
+ return NodeAggregateResult_Err_DEFAULT
+ }
+ return p.Err
+}
+func (p *NodeAggregateResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeAggregateResult) IsSetErr() bool {
+ return p.Err != nil
+}
+
+func (p *NodeAggregateResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &AggregateQueryResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &Error{
+ Type: 0,
+ }
+ if err := p.Err.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("aggregate_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeAggregateResult) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetErr() {
+ if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
+ }
+ if err := p.Err.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeAggregateResult) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("NodeAggregateResult(%+v)", *p)
+}
+
+// Attributes:
+// - Req
+type NodeFetchArgs struct {
+ Req *FetchRequest `thrift:"req,1" db:"req" json:"req"`
+}
+
+func NewNodeFetchArgs() *NodeFetchArgs {
+ return &NodeFetchArgs{}
+}
+
+var NodeFetchArgs_Req_DEFAULT *FetchRequest
+
+func (p *NodeFetchArgs) GetReq() *FetchRequest {
+ if !p.IsSetReq() {
+ return NodeFetchArgs_Req_DEFAULT
+ }
+ return p.Req
+}
+func (p *NodeFetchArgs) IsSetReq() bool {
+ return p.Req != nil
+}
+
+func (p *NodeFetchArgs) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *NodeFetchArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &FetchRequest{
+ RangeType: 0,
+
+ ResultTimeType: 0,
+ }
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeFetchArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+ }
+ if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
+ }
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
+ }
+ return nil
+}
+
+func (p *NodeFetchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeFetchArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("NodeFetchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+// - Err
+type NodeFetchResult struct {
+ Success *FetchResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+}
+
+func NewNodeFetchResult() *NodeFetchResult {
+ return &NodeFetchResult{}
+}
+
+var NodeFetchResult_Success_DEFAULT *FetchResult_
+
+func (p *NodeFetchResult) GetSuccess() *FetchResult_ {
+ if !p.IsSetSuccess() {
+ return NodeFetchResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeFetchResult_Err_DEFAULT *Error
+
+func (p *NodeFetchResult) GetErr() *Error {
+ if !p.IsSetErr() {
+ return NodeFetchResult_Err_DEFAULT
+ }
+ return p.Err
+}
+func (p *NodeFetchResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeFetchResult) IsSetErr() bool {
+ return p.Err != nil
+}
+
+func (p *NodeFetchResult) Read(iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
- return true, err
+ return nil
}
-type nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond struct {
- handler Node
+func (p *NodeFetchResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &FetchResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
}
-func (p *nodeProcessorSetWriteNewSeriesLimitPerShardPerSecond) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
+func (p *NodeFetchResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &Error{
+ Type: 0,
}
+ if err := p.Err.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
+ }
+ return nil
+}
- iprot.ReadMessageEnd()
- result := NodeSetWriteNewSeriesLimitPerShardPerSecondResult{}
- var retval *NodeWriteNewSeriesLimitPerShardPerSecondResult_
- var err2 error
- if retval, err2 = p.handler.SetWriteNewSeriesLimitPerShardPerSecond(args.Req); err2 != nil {
- switch v := err2.(type) {
- case *Error:
- result.Err = v
- default:
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing setWriteNewSeriesLimitPerShardPerSecond: "+err2.Error())
- oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- }
- } else {
- result.Success = retval
+func (p *NodeFetchResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetch_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
- if err2 = oprot.WriteMessageBegin("setWriteNewSeriesLimitPerShardPerSecond", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ if err := oprot.WriteFieldStop(); err != nil {
+ return thrift.PrependError("write field stop error: ", err)
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if err := oprot.WriteStructEnd(); err != nil {
+ return thrift.PrependError("write struct stop error: ", err)
}
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
+ return nil
+}
+
+func (p *NodeFetchResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
}
- if err != nil {
- return
+ return err
+}
+
+func (p *NodeFetchResult) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetErr() {
+ if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
+ }
+ if err := p.Err.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err)
+ }
}
- return true, err
+ return err
}
-// HELPER FUNCTIONS AND STRUCTURES
+func (p *NodeFetchResult) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("NodeFetchResult(%+v)", *p)
+}
// Attributes:
// - Req
-type NodeQueryArgs struct {
- Req *QueryRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeWriteArgs struct {
+ Req *WriteRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeQueryArgs() *NodeQueryArgs {
- return &NodeQueryArgs{}
+func NewNodeWriteArgs() *NodeWriteArgs {
+ return &NodeWriteArgs{}
}
-var NodeQueryArgs_Req_DEFAULT *QueryRequest
+var NodeWriteArgs_Req_DEFAULT *WriteRequest
-func (p *NodeQueryArgs) GetReq() *QueryRequest {
+func (p *NodeWriteArgs) GetReq() *WriteRequest {
if !p.IsSetReq() {
- return NodeQueryArgs_Req_DEFAULT
+ return NodeWriteArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeQueryArgs) IsSetReq() bool {
+func (p *NodeWriteArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeQueryArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -16615,20 +18780,16 @@ func (p *NodeQueryArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeQueryArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &QueryRequest{
- RangeType: 0,
-
- ResultTimeType: 0,
- }
+func (p *NodeWriteArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &WriteRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeQueryArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("query_args"); err != nil {
+func (p *NodeWriteArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("write_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -16645,7 +18806,7 @@ func (p *NodeQueryArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeQueryArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -16658,51 +18819,36 @@ func (p *NodeQueryArgs) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeQueryArgs) String() string {
+func (p *NodeWriteArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeQueryArgs(%+v)", *p)
+ return fmt.Sprintf("NodeWriteArgs(%+v)", *p)
}
// Attributes:
-// - Success
// - Err
-type NodeQueryResult struct {
- Success *QueryResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
-}
-
-func NewNodeQueryResult() *NodeQueryResult {
- return &NodeQueryResult{}
+type NodeWriteResult struct {
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-var NodeQueryResult_Success_DEFAULT *QueryResult_
-
-func (p *NodeQueryResult) GetSuccess() *QueryResult_ {
- if !p.IsSetSuccess() {
- return NodeQueryResult_Success_DEFAULT
- }
- return p.Success
+func NewNodeWriteResult() *NodeWriteResult {
+ return &NodeWriteResult{}
}
-var NodeQueryResult_Err_DEFAULT *Error
+var NodeWriteResult_Err_DEFAULT *Error
-func (p *NodeQueryResult) GetErr() *Error {
+func (p *NodeWriteResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeQueryResult_Err_DEFAULT
+ return NodeWriteResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeQueryResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *NodeQueryResult) IsSetErr() bool {
+func (p *NodeWriteResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeQueryResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -16716,10 +18862,6 @@ func (p *NodeQueryResult) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
- case 0:
- if err := p.ReadField0(iprot); err != nil {
- return err
- }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -16739,15 +18881,7 @@ func (p *NodeQueryResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeQueryResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &QueryResult_{}
- if err := p.Success.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
- }
- return nil
-}
-
-func (p *NodeQueryResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeWriteResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -16757,14 +18891,11 @@ func (p *NodeQueryResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeQueryResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("query_result"); err != nil {
+func (p *NodeWriteResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("write_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField0(oprot); err != nil {
- return err
- }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -16778,22 +18909,7 @@ func (p *NodeQueryResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeQueryResult) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := p.Success.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *NodeQueryResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -16808,36 +18924,36 @@ func (p *NodeQueryResult) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeQueryResult) String() string {
+func (p *NodeWriteResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeQueryResult(%+v)", *p)
+ return fmt.Sprintf("NodeWriteResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeAggregateRawArgs struct {
- Req *AggregateQueryRawRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeWriteTaggedArgs struct {
+ Req *WriteTaggedRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeAggregateRawArgs() *NodeAggregateRawArgs {
- return &NodeAggregateRawArgs{}
+func NewNodeWriteTaggedArgs() *NodeWriteTaggedArgs {
+ return &NodeWriteTaggedArgs{}
}
-var NodeAggregateRawArgs_Req_DEFAULT *AggregateQueryRawRequest
+var NodeWriteTaggedArgs_Req_DEFAULT *WriteTaggedRequest
-func (p *NodeAggregateRawArgs) GetReq() *AggregateQueryRawRequest {
+func (p *NodeWriteTaggedArgs) GetReq() *WriteTaggedRequest {
if !p.IsSetReq() {
- return NodeAggregateRawArgs_Req_DEFAULT
+ return NodeWriteTaggedArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeAggregateRawArgs) IsSetReq() bool {
+func (p *NodeWriteTaggedArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeAggregateRawArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -16864,26 +18980,22 @@ func (p *NodeAggregateRawArgs) Read(iprot thrift.TProtocol) error {
return err
}
}
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *NodeAggregateRawArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &AggregateQueryRawRequest{
- AggregateQueryType: 1,
-
- RangeType: 0,
+ if err := iprot.ReadStructEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
+ return nil
+}
+
+func (p *NodeWriteTaggedArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &WriteTaggedRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeAggregateRawArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("aggregateRaw_args"); err != nil {
+func (p *NodeWriteTaggedArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeTagged_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -16900,7 +19012,7 @@ func (p *NodeAggregateRawArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteTaggedArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -16913,51 +19025,36 @@ func (p *NodeAggregateRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeAggregateRawArgs) String() string {
+func (p *NodeWriteTaggedArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeAggregateRawArgs(%+v)", *p)
+ return fmt.Sprintf("NodeWriteTaggedArgs(%+v)", *p)
}
// Attributes:
-// - Success
// - Err
-type NodeAggregateRawResult struct {
- Success *AggregateQueryRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
-}
-
-func NewNodeAggregateRawResult() *NodeAggregateRawResult {
- return &NodeAggregateRawResult{}
+type NodeWriteTaggedResult struct {
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-var NodeAggregateRawResult_Success_DEFAULT *AggregateQueryRawResult_
-
-func (p *NodeAggregateRawResult) GetSuccess() *AggregateQueryRawResult_ {
- if !p.IsSetSuccess() {
- return NodeAggregateRawResult_Success_DEFAULT
- }
- return p.Success
+func NewNodeWriteTaggedResult() *NodeWriteTaggedResult {
+ return &NodeWriteTaggedResult{}
}
-var NodeAggregateRawResult_Err_DEFAULT *Error
+var NodeWriteTaggedResult_Err_DEFAULT *Error
-func (p *NodeAggregateRawResult) GetErr() *Error {
+func (p *NodeWriteTaggedResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeAggregateRawResult_Err_DEFAULT
+ return NodeWriteTaggedResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeAggregateRawResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *NodeAggregateRawResult) IsSetErr() bool {
+func (p *NodeWriteTaggedResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeAggregateRawResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -16971,10 +19068,6 @@ func (p *NodeAggregateRawResult) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
- case 0:
- if err := p.ReadField0(iprot); err != nil {
- return err
- }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -16994,15 +19087,7 @@ func (p *NodeAggregateRawResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateRawResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &AggregateQueryRawResult_{}
- if err := p.Success.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
- }
- return nil
-}
-
-func (p *NodeAggregateRawResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -17012,14 +19097,11 @@ func (p *NodeAggregateRawResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateRawResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("aggregateRaw_result"); err != nil {
+func (p *NodeWriteTaggedResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeTagged_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField0(oprot); err != nil {
- return err
- }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -17033,22 +19115,7 @@ func (p *NodeAggregateRawResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateRawResult) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := p.Success.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *NodeAggregateRawResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteTaggedResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -17063,36 +19130,36 @@ func (p *NodeAggregateRawResult) writeField1(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeAggregateRawResult) String() string {
+func (p *NodeWriteTaggedResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeAggregateRawResult(%+v)", *p)
+ return fmt.Sprintf("NodeWriteTaggedResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeAggregateArgs struct {
- Req *AggregateQueryRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeFetchBatchRawArgs struct {
+ Req *FetchBatchRawRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeAggregateArgs() *NodeAggregateArgs {
- return &NodeAggregateArgs{}
+func NewNodeFetchBatchRawArgs() *NodeFetchBatchRawArgs {
+ return &NodeFetchBatchRawArgs{}
}
-var NodeAggregateArgs_Req_DEFAULT *AggregateQueryRequest
+var NodeFetchBatchRawArgs_Req_DEFAULT *FetchBatchRawRequest
-func (p *NodeAggregateArgs) GetReq() *AggregateQueryRequest {
+func (p *NodeFetchBatchRawArgs) GetReq() *FetchBatchRawRequest {
if !p.IsSetReq() {
- return NodeAggregateArgs_Req_DEFAULT
+ return NodeFetchBatchRawArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeAggregateArgs) IsSetReq() bool {
+func (p *NodeFetchBatchRawArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeAggregateArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBatchRawArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17125,11 +19192,9 @@ func (p *NodeAggregateArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &AggregateQueryRequest{
- AggregateQueryType: 1,
-
- RangeType: 0,
+func (p *NodeFetchBatchRawArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &FetchBatchRawRequest{
+ RangeTimeType: 0,
}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
@@ -17137,8 +19202,8 @@ func (p *NodeAggregateArgs) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("aggregate_args"); err != nil {
+func (p *NodeFetchBatchRawArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBatchRaw_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -17155,7 +19220,7 @@ func (p *NodeAggregateArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -17168,51 +19233,51 @@ func (p *NodeAggregateArgs) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeAggregateArgs) String() string {
+func (p *NodeFetchBatchRawArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeAggregateArgs(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBatchRawArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeAggregateResult struct {
- Success *AggregateQueryResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeFetchBatchRawResult struct {
+ Success *FetchBatchRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeAggregateResult() *NodeAggregateResult {
- return &NodeAggregateResult{}
+func NewNodeFetchBatchRawResult() *NodeFetchBatchRawResult {
+ return &NodeFetchBatchRawResult{}
}
-var NodeAggregateResult_Success_DEFAULT *AggregateQueryResult_
+var NodeFetchBatchRawResult_Success_DEFAULT *FetchBatchRawResult_
-func (p *NodeAggregateResult) GetSuccess() *AggregateQueryResult_ {
+func (p *NodeFetchBatchRawResult) GetSuccess() *FetchBatchRawResult_ {
if !p.IsSetSuccess() {
- return NodeAggregateResult_Success_DEFAULT
+ return NodeFetchBatchRawResult_Success_DEFAULT
}
return p.Success
}
-var NodeAggregateResult_Err_DEFAULT *Error
+var NodeFetchBatchRawResult_Err_DEFAULT *Error
-func (p *NodeAggregateResult) GetErr() *Error {
+func (p *NodeFetchBatchRawResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeAggregateResult_Err_DEFAULT
+ return NodeFetchBatchRawResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeAggregateResult) IsSetSuccess() bool {
+func (p *NodeFetchBatchRawResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeAggregateResult) IsSetErr() bool {
+func (p *NodeFetchBatchRawResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeAggregateResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBatchRawResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17249,15 +19314,15 @@ func (p *NodeAggregateResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &AggregateQueryResult_{}
+func (p *NodeFetchBatchRawResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &FetchBatchRawResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeAggregateResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeFetchBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -17267,8 +19332,8 @@ func (p *NodeAggregateResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("aggregate_result"); err != nil {
+func (p *NodeFetchBatchRawResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBatchRaw_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -17288,7 +19353,7 @@ func (p *NodeAggregateResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeAggregateResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBatchRawResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -17303,7 +19368,7 @@ func (p *NodeAggregateResult) writeField0(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeAggregateResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBatchRawResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -17318,36 +19383,36 @@ func (p *NodeAggregateResult) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeAggregateResult) String() string {
+func (p *NodeFetchBatchRawResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeAggregateResult(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBatchRawResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeFetchArgs struct {
- Req *FetchRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeFetchBatchRawV2Args struct {
+ Req *FetchBatchRawV2Request `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeFetchArgs() *NodeFetchArgs {
- return &NodeFetchArgs{}
+func NewNodeFetchBatchRawV2Args() *NodeFetchBatchRawV2Args {
+ return &NodeFetchBatchRawV2Args{}
}
-var NodeFetchArgs_Req_DEFAULT *FetchRequest
+var NodeFetchBatchRawV2Args_Req_DEFAULT *FetchBatchRawV2Request
-func (p *NodeFetchArgs) GetReq() *FetchRequest {
+func (p *NodeFetchBatchRawV2Args) GetReq() *FetchBatchRawV2Request {
if !p.IsSetReq() {
- return NodeFetchArgs_Req_DEFAULT
+ return NodeFetchBatchRawV2Args_Req_DEFAULT
}
return p.Req
}
-func (p *NodeFetchArgs) IsSetReq() bool {
+func (p *NodeFetchBatchRawV2Args) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeFetchArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBatchRawV2Args) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17380,20 +19445,16 @@ func (p *NodeFetchArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &FetchRequest{
- RangeType: 0,
-
- ResultTimeType: 0,
- }
+func (p *NodeFetchBatchRawV2Args) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &FetchBatchRawV2Request{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeFetchArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetch_args"); err != nil {
+func (p *NodeFetchBatchRawV2Args) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBatchRawV2_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -17410,7 +19471,7 @@ func (p *NodeFetchArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBatchRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -17423,51 +19484,51 @@ func (p *NodeFetchArgs) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeFetchArgs) String() string {
+func (p *NodeFetchBatchRawV2Args) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchArgs(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBatchRawV2Args(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeFetchResult struct {
- Success *FetchResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeFetchBatchRawV2Result struct {
+ Success *FetchBatchRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeFetchResult() *NodeFetchResult {
- return &NodeFetchResult{}
+func NewNodeFetchBatchRawV2Result() *NodeFetchBatchRawV2Result {
+ return &NodeFetchBatchRawV2Result{}
}
-var NodeFetchResult_Success_DEFAULT *FetchResult_
+var NodeFetchBatchRawV2Result_Success_DEFAULT *FetchBatchRawResult_
-func (p *NodeFetchResult) GetSuccess() *FetchResult_ {
+func (p *NodeFetchBatchRawV2Result) GetSuccess() *FetchBatchRawResult_ {
if !p.IsSetSuccess() {
- return NodeFetchResult_Success_DEFAULT
+ return NodeFetchBatchRawV2Result_Success_DEFAULT
}
return p.Success
}
-var NodeFetchResult_Err_DEFAULT *Error
+var NodeFetchBatchRawV2Result_Err_DEFAULT *Error
-func (p *NodeFetchResult) GetErr() *Error {
+func (p *NodeFetchBatchRawV2Result) GetErr() *Error {
if !p.IsSetErr() {
- return NodeFetchResult_Err_DEFAULT
+ return NodeFetchBatchRawV2Result_Err_DEFAULT
}
return p.Err
}
-func (p *NodeFetchResult) IsSetSuccess() bool {
+func (p *NodeFetchBatchRawV2Result) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeFetchResult) IsSetErr() bool {
+func (p *NodeFetchBatchRawV2Result) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeFetchResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBatchRawV2Result) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17504,15 +19565,15 @@ func (p *NodeFetchResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &FetchResult_{}
+func (p *NodeFetchBatchRawV2Result) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &FetchBatchRawResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeFetchResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeFetchBatchRawV2Result) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -17522,8 +19583,8 @@ func (p *NodeFetchResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetch_result"); err != nil {
+func (p *NodeFetchBatchRawV2Result) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBatchRawV2_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -17543,7 +19604,7 @@ func (p *NodeFetchResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBatchRawV2Result) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -17558,7 +19619,7 @@ func (p *NodeFetchResult) writeField0(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeFetchResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -17573,36 +19634,36 @@ func (p *NodeFetchResult) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeFetchResult) String() string {
+func (p *NodeFetchBatchRawV2Result) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchResult(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBatchRawV2Result(%+v)", *p)
}
// Attributes:
// - Req
-type NodeFetchTaggedArgs struct {
- Req *FetchTaggedRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeFetchBlocksRawArgs struct {
+ Req *FetchBlocksRawRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeFetchTaggedArgs() *NodeFetchTaggedArgs {
- return &NodeFetchTaggedArgs{}
+func NewNodeFetchBlocksRawArgs() *NodeFetchBlocksRawArgs {
+ return &NodeFetchBlocksRawArgs{}
}
-var NodeFetchTaggedArgs_Req_DEFAULT *FetchTaggedRequest
+var NodeFetchBlocksRawArgs_Req_DEFAULT *FetchBlocksRawRequest
-func (p *NodeFetchTaggedArgs) GetReq() *FetchTaggedRequest {
+func (p *NodeFetchBlocksRawArgs) GetReq() *FetchBlocksRawRequest {
if !p.IsSetReq() {
- return NodeFetchTaggedArgs_Req_DEFAULT
+ return NodeFetchBlocksRawArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeFetchTaggedArgs) IsSetReq() bool {
+func (p *NodeFetchBlocksRawArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeFetchTaggedArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBlocksRawArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17635,18 +19696,16 @@ func (p *NodeFetchTaggedArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchTaggedArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &FetchTaggedRequest{
- RangeTimeType: 0,
- }
+func (p *NodeFetchBlocksRawArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &FetchBlocksRawRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeFetchTaggedArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchTagged_args"); err != nil {
+func (p *NodeFetchBlocksRawArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBlocksRaw_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -17663,7 +19722,7 @@ func (p *NodeFetchTaggedArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchTaggedArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBlocksRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -17676,51 +19735,51 @@ func (p *NodeFetchTaggedArgs) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeFetchTaggedArgs) String() string {
+func (p *NodeFetchBlocksRawArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchTaggedArgs(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBlocksRawArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeFetchTaggedResult struct {
- Success *FetchTaggedResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeFetchBlocksRawResult struct {
+ Success *FetchBlocksRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeFetchTaggedResult() *NodeFetchTaggedResult {
- return &NodeFetchTaggedResult{}
+func NewNodeFetchBlocksRawResult() *NodeFetchBlocksRawResult {
+ return &NodeFetchBlocksRawResult{}
}
-var NodeFetchTaggedResult_Success_DEFAULT *FetchTaggedResult_
+var NodeFetchBlocksRawResult_Success_DEFAULT *FetchBlocksRawResult_
-func (p *NodeFetchTaggedResult) GetSuccess() *FetchTaggedResult_ {
+func (p *NodeFetchBlocksRawResult) GetSuccess() *FetchBlocksRawResult_ {
if !p.IsSetSuccess() {
- return NodeFetchTaggedResult_Success_DEFAULT
+ return NodeFetchBlocksRawResult_Success_DEFAULT
}
return p.Success
}
-var NodeFetchTaggedResult_Err_DEFAULT *Error
+var NodeFetchBlocksRawResult_Err_DEFAULT *Error
-func (p *NodeFetchTaggedResult) GetErr() *Error {
+func (p *NodeFetchBlocksRawResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeFetchTaggedResult_Err_DEFAULT
+ return NodeFetchBlocksRawResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeFetchTaggedResult) IsSetSuccess() bool {
+func (p *NodeFetchBlocksRawResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeFetchTaggedResult) IsSetErr() bool {
+func (p *NodeFetchBlocksRawResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeFetchTaggedResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBlocksRawResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17757,15 +19816,15 @@ func (p *NodeFetchTaggedResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchTaggedResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &FetchTaggedResult_{}
+func (p *NodeFetchBlocksRawResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &FetchBlocksRawResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeFetchTaggedResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeFetchBlocksRawResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -17775,8 +19834,8 @@ func (p *NodeFetchTaggedResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchTaggedResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchTagged_result"); err != nil {
+func (p *NodeFetchBlocksRawResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBlocksRaw_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -17796,7 +19855,7 @@ func (p *NodeFetchTaggedResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchTaggedResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBlocksRawResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -17811,7 +19870,7 @@ func (p *NodeFetchTaggedResult) writeField0(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeFetchTaggedResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBlocksRawResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -17826,36 +19885,36 @@ func (p *NodeFetchTaggedResult) writeField1(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeFetchTaggedResult) String() string {
+func (p *NodeFetchBlocksRawResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchTaggedResult(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBlocksRawResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeWriteArgs struct {
- Req *WriteRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeFetchTaggedArgs struct {
+ Req *FetchTaggedRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeWriteArgs() *NodeWriteArgs {
- return &NodeWriteArgs{}
+func NewNodeFetchTaggedArgs() *NodeFetchTaggedArgs {
+ return &NodeFetchTaggedArgs{}
}
-var NodeWriteArgs_Req_DEFAULT *WriteRequest
+var NodeFetchTaggedArgs_Req_DEFAULT *FetchTaggedRequest
-func (p *NodeWriteArgs) GetReq() *WriteRequest {
+func (p *NodeFetchTaggedArgs) GetReq() *FetchTaggedRequest {
if !p.IsSetReq() {
- return NodeWriteArgs_Req_DEFAULT
+ return NodeFetchTaggedArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeWriteArgs) IsSetReq() bool {
+func (p *NodeFetchTaggedArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeWriteArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchTaggedArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17888,16 +19947,18 @@ func (p *NodeWriteArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &WriteRequest{}
+func (p *NodeFetchTaggedArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &FetchTaggedRequest{
+ RangeTimeType: 0,
+ }
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeWriteArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("write_args"); err != nil {
+func (p *NodeFetchTaggedArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchTagged_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -17914,7 +19975,7 @@ func (p *NodeWriteArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchTaggedArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -17927,36 +19988,51 @@ func (p *NodeWriteArgs) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeWriteArgs) String() string {
+func (p *NodeFetchTaggedArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteArgs(%+v)", *p)
+ return fmt.Sprintf("NodeFetchTaggedArgs(%+v)", *p)
}
// Attributes:
+// - Success
// - Err
-type NodeWriteResult struct {
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeFetchTaggedResult struct {
+ Success *FetchTaggedResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeWriteResult() *NodeWriteResult {
- return &NodeWriteResult{}
+func NewNodeFetchTaggedResult() *NodeFetchTaggedResult {
+ return &NodeFetchTaggedResult{}
}
-var NodeWriteResult_Err_DEFAULT *Error
+var NodeFetchTaggedResult_Success_DEFAULT *FetchTaggedResult_
-func (p *NodeWriteResult) GetErr() *Error {
+func (p *NodeFetchTaggedResult) GetSuccess() *FetchTaggedResult_ {
+ if !p.IsSetSuccess() {
+ return NodeFetchTaggedResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeFetchTaggedResult_Err_DEFAULT *Error
+
+func (p *NodeFetchTaggedResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeWriteResult_Err_DEFAULT
+ return NodeFetchTaggedResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeWriteResult) IsSetErr() bool {
+func (p *NodeFetchTaggedResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeFetchTaggedResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeWriteResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchTaggedResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -17970,6 +20046,10 @@ func (p *NodeWriteResult) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -17989,7 +20069,15 @@ func (p *NodeWriteResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeFetchTaggedResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &FetchTaggedResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *NodeFetchTaggedResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -17999,11 +20087,14 @@ func (p *NodeWriteResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("write_result"); err != nil {
+func (p *NodeFetchTaggedResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchTagged_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -18017,7 +20108,22 @@ func (p *NodeWriteResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchTaggedResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeFetchTaggedResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -18032,36 +20138,36 @@ func (p *NodeWriteResult) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeWriteResult) String() string {
+func (p *NodeFetchTaggedResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteResult(%+v)", *p)
+ return fmt.Sprintf("NodeFetchTaggedResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeWriteTaggedArgs struct {
- Req *WriteTaggedRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeAggregateRawArgs struct {
+ Req *AggregateQueryRawRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeWriteTaggedArgs() *NodeWriteTaggedArgs {
- return &NodeWriteTaggedArgs{}
+func NewNodeAggregateRawArgs() *NodeAggregateRawArgs {
+ return &NodeAggregateRawArgs{}
}
-var NodeWriteTaggedArgs_Req_DEFAULT *WriteTaggedRequest
+var NodeAggregateRawArgs_Req_DEFAULT *AggregateQueryRawRequest
-func (p *NodeWriteTaggedArgs) GetReq() *WriteTaggedRequest {
+func (p *NodeAggregateRawArgs) GetReq() *AggregateQueryRawRequest {
if !p.IsSetReq() {
- return NodeWriteTaggedArgs_Req_DEFAULT
+ return NodeAggregateRawArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeWriteTaggedArgs) IsSetReq() bool {
+func (p *NodeAggregateRawArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeWriteTaggedArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeAggregateRawArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18094,16 +20200,20 @@ func (p *NodeWriteTaggedArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &WriteTaggedRequest{}
+func (p *NodeAggregateRawArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &AggregateQueryRawRequest{
+ AggregateQueryType: 1,
+
+ RangeType: 0,
+ }
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeWriteTaggedArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeTagged_args"); err != nil {
+func (p *NodeAggregateRawArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("aggregateRaw_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -18120,7 +20230,7 @@ func (p *NodeWriteTaggedArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeAggregateRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -18133,36 +20243,51 @@ func (p *NodeWriteTaggedArgs) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeWriteTaggedArgs) String() string {
+func (p *NodeAggregateRawArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteTaggedArgs(%+v)", *p)
+ return fmt.Sprintf("NodeAggregateRawArgs(%+v)", *p)
}
// Attributes:
+// - Success
// - Err
-type NodeWriteTaggedResult struct {
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeAggregateRawResult struct {
+ Success *AggregateQueryRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeWriteTaggedResult() *NodeWriteTaggedResult {
- return &NodeWriteTaggedResult{}
+func NewNodeAggregateRawResult() *NodeAggregateRawResult {
+ return &NodeAggregateRawResult{}
}
-var NodeWriteTaggedResult_Err_DEFAULT *Error
+var NodeAggregateRawResult_Success_DEFAULT *AggregateQueryRawResult_
-func (p *NodeWriteTaggedResult) GetErr() *Error {
+func (p *NodeAggregateRawResult) GetSuccess() *AggregateQueryRawResult_ {
+ if !p.IsSetSuccess() {
+ return NodeAggregateRawResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeAggregateRawResult_Err_DEFAULT *Error
+
+func (p *NodeAggregateRawResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeWriteTaggedResult_Err_DEFAULT
+ return NodeAggregateRawResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeWriteTaggedResult) IsSetErr() bool {
+func (p *NodeAggregateRawResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeAggregateRawResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeWriteTaggedResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeAggregateRawResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18176,6 +20301,10 @@ func (p *NodeWriteTaggedResult) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -18195,7 +20324,15 @@ func (p *NodeWriteTaggedResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeAggregateRawResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &AggregateQueryRawResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *NodeAggregateRawResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -18205,11 +20342,14 @@ func (p *NodeWriteTaggedResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeTagged_result"); err != nil {
+func (p *NodeAggregateRawResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("aggregateRaw_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -18223,7 +20363,22 @@ func (p *NodeWriteTaggedResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeAggregateRawResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeAggregateRawResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -18238,36 +20393,36 @@ func (p *NodeWriteTaggedResult) writeField1(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeWriteTaggedResult) String() string {
+func (p *NodeAggregateRawResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteTaggedResult(%+v)", *p)
+ return fmt.Sprintf("NodeAggregateRawResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeFetchBatchRawArgs struct {
- Req *FetchBatchRawRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeFetchBlocksMetadataRawV2Args struct {
+ Req *FetchBlocksMetadataRawV2Request `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeFetchBatchRawArgs() *NodeFetchBatchRawArgs {
- return &NodeFetchBatchRawArgs{}
+func NewNodeFetchBlocksMetadataRawV2Args() *NodeFetchBlocksMetadataRawV2Args {
+ return &NodeFetchBlocksMetadataRawV2Args{}
}
-var NodeFetchBatchRawArgs_Req_DEFAULT *FetchBatchRawRequest
+var NodeFetchBlocksMetadataRawV2Args_Req_DEFAULT *FetchBlocksMetadataRawV2Request
-func (p *NodeFetchBatchRawArgs) GetReq() *FetchBatchRawRequest {
+func (p *NodeFetchBlocksMetadataRawV2Args) GetReq() *FetchBlocksMetadataRawV2Request {
if !p.IsSetReq() {
- return NodeFetchBatchRawArgs_Req_DEFAULT
+ return NodeFetchBlocksMetadataRawV2Args_Req_DEFAULT
}
return p.Req
}
-func (p *NodeFetchBatchRawArgs) IsSetReq() bool {
+func (p *NodeFetchBlocksMetadataRawV2Args) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeFetchBatchRawArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBlocksMetadataRawV2Args) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18300,18 +20455,16 @@ func (p *NodeFetchBatchRawArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &FetchBatchRawRequest{
- RangeTimeType: 0,
- }
+func (p *NodeFetchBlocksMetadataRawV2Args) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &FetchBlocksMetadataRawV2Request{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeFetchBatchRawArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBatchRaw_args"); err != nil {
+func (p *NodeFetchBlocksMetadataRawV2Args) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBlocksMetadataRawV2_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -18328,7 +20481,7 @@ func (p *NodeFetchBatchRawArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBlocksMetadataRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -18341,51 +20494,51 @@ func (p *NodeFetchBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeFetchBatchRawArgs) String() string {
+func (p *NodeFetchBlocksMetadataRawV2Args) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBatchRawArgs(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBlocksMetadataRawV2Args(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeFetchBatchRawResult struct {
- Success *FetchBatchRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeFetchBlocksMetadataRawV2Result struct {
+ Success *FetchBlocksMetadataRawV2Result_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeFetchBatchRawResult() *NodeFetchBatchRawResult {
- return &NodeFetchBatchRawResult{}
+func NewNodeFetchBlocksMetadataRawV2Result() *NodeFetchBlocksMetadataRawV2Result {
+ return &NodeFetchBlocksMetadataRawV2Result{}
}
-var NodeFetchBatchRawResult_Success_DEFAULT *FetchBatchRawResult_
+var NodeFetchBlocksMetadataRawV2Result_Success_DEFAULT *FetchBlocksMetadataRawV2Result_
-func (p *NodeFetchBatchRawResult) GetSuccess() *FetchBatchRawResult_ {
+func (p *NodeFetchBlocksMetadataRawV2Result) GetSuccess() *FetchBlocksMetadataRawV2Result_ {
if !p.IsSetSuccess() {
- return NodeFetchBatchRawResult_Success_DEFAULT
+ return NodeFetchBlocksMetadataRawV2Result_Success_DEFAULT
}
return p.Success
}
-var NodeFetchBatchRawResult_Err_DEFAULT *Error
+var NodeFetchBlocksMetadataRawV2Result_Err_DEFAULT *Error
-func (p *NodeFetchBatchRawResult) GetErr() *Error {
+func (p *NodeFetchBlocksMetadataRawV2Result) GetErr() *Error {
if !p.IsSetErr() {
- return NodeFetchBatchRawResult_Err_DEFAULT
+ return NodeFetchBlocksMetadataRawV2Result_Err_DEFAULT
}
return p.Err
}
-func (p *NodeFetchBatchRawResult) IsSetSuccess() bool {
+func (p *NodeFetchBlocksMetadataRawV2Result) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeFetchBatchRawResult) IsSetErr() bool {
+func (p *NodeFetchBlocksMetadataRawV2Result) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeFetchBatchRawResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeFetchBlocksMetadataRawV2Result) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18422,15 +20575,15 @@ func (p *NodeFetchBatchRawResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &FetchBatchRawResult_{}
+func (p *NodeFetchBlocksMetadataRawV2Result) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &FetchBlocksMetadataRawV2Result_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeFetchBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeFetchBlocksMetadataRawV2Result) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -18440,8 +20593,8 @@ func (p *NodeFetchBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBatchRaw_result"); err != nil {
+func (p *NodeFetchBlocksMetadataRawV2Result) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("fetchBlocksMetadataRawV2_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -18461,7 +20614,7 @@ func (p *NodeFetchBatchRawResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBlocksMetadataRawV2Result) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -18476,7 +20629,7 @@ func (p *NodeFetchBatchRawResult) writeField0(oprot thrift.TProtocol) (err error
return err
}
-func (p *NodeFetchBatchRawResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeFetchBlocksMetadataRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -18491,36 +20644,36 @@ func (p *NodeFetchBatchRawResult) writeField1(oprot thrift.TProtocol) (err error
return err
}
-func (p *NodeFetchBatchRawResult) String() string {
+func (p *NodeFetchBlocksMetadataRawV2Result) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBatchRawResult(%+v)", *p)
+ return fmt.Sprintf("NodeFetchBlocksMetadataRawV2Result(%+v)", *p)
}
// Attributes:
// - Req
-type NodeFetchBatchRawV2Args struct {
- Req *FetchBatchRawV2Request `thrift:"req,1" db:"req" json:"req"`
+type NodeWriteBatchRawArgs struct {
+ Req *WriteBatchRawRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeFetchBatchRawV2Args() *NodeFetchBatchRawV2Args {
- return &NodeFetchBatchRawV2Args{}
+func NewNodeWriteBatchRawArgs() *NodeWriteBatchRawArgs {
+ return &NodeWriteBatchRawArgs{}
}
-var NodeFetchBatchRawV2Args_Req_DEFAULT *FetchBatchRawV2Request
+var NodeWriteBatchRawArgs_Req_DEFAULT *WriteBatchRawRequest
-func (p *NodeFetchBatchRawV2Args) GetReq() *FetchBatchRawV2Request {
+func (p *NodeWriteBatchRawArgs) GetReq() *WriteBatchRawRequest {
if !p.IsSetReq() {
- return NodeFetchBatchRawV2Args_Req_DEFAULT
+ return NodeWriteBatchRawArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeFetchBatchRawV2Args) IsSetReq() bool {
+func (p *NodeWriteBatchRawArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeFetchBatchRawV2Args) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteBatchRawArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18553,16 +20706,16 @@ func (p *NodeFetchBatchRawV2Args) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawV2Args) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &FetchBatchRawV2Request{}
+func (p *NodeWriteBatchRawArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &WriteBatchRawRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeFetchBatchRawV2Args) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBatchRawV2_args"); err != nil {
+func (p *NodeWriteBatchRawArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeBatchRaw_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -18579,7 +20732,7 @@ func (p *NodeFetchBatchRawV2Args) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -18592,51 +20745,36 @@ func (p *NodeFetchBatchRawV2Args) writeField1(oprot thrift.TProtocol) (err error
return err
}
-func (p *NodeFetchBatchRawV2Args) String() string {
+func (p *NodeWriteBatchRawArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBatchRawV2Args(%+v)", *p)
+ return fmt.Sprintf("NodeWriteBatchRawArgs(%+v)", *p)
}
// Attributes:
-// - Success
// - Err
-type NodeFetchBatchRawV2Result struct {
- Success *FetchBatchRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
-}
-
-func NewNodeFetchBatchRawV2Result() *NodeFetchBatchRawV2Result {
- return &NodeFetchBatchRawV2Result{}
+type NodeWriteBatchRawResult struct {
+ Err *WriteBatchRawErrors `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-var NodeFetchBatchRawV2Result_Success_DEFAULT *FetchBatchRawResult_
-
-func (p *NodeFetchBatchRawV2Result) GetSuccess() *FetchBatchRawResult_ {
- if !p.IsSetSuccess() {
- return NodeFetchBatchRawV2Result_Success_DEFAULT
- }
- return p.Success
+func NewNodeWriteBatchRawResult() *NodeWriteBatchRawResult {
+ return &NodeWriteBatchRawResult{}
}
-var NodeFetchBatchRawV2Result_Err_DEFAULT *Error
+var NodeWriteBatchRawResult_Err_DEFAULT *WriteBatchRawErrors
-func (p *NodeFetchBatchRawV2Result) GetErr() *Error {
+func (p *NodeWriteBatchRawResult) GetErr() *WriteBatchRawErrors {
if !p.IsSetErr() {
- return NodeFetchBatchRawV2Result_Err_DEFAULT
+ return NodeWriteBatchRawResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeFetchBatchRawV2Result) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *NodeFetchBatchRawV2Result) IsSetErr() bool {
+func (p *NodeWriteBatchRawResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeFetchBatchRawV2Result) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteBatchRawResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18650,10 +20788,6 @@ func (p *NodeFetchBatchRawV2Result) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
- case 0:
- if err := p.ReadField0(iprot); err != nil {
- return err
- }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -18673,32 +20807,19 @@ func (p *NodeFetchBatchRawV2Result) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawV2Result) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &FetchBatchRawResult_{}
- if err := p.Success.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
- }
- return nil
-}
-
-func (p *NodeFetchBatchRawV2Result) ReadField1(iprot thrift.TProtocol) error {
- p.Err = &Error{
- Type: 0,
- }
+func (p *NodeWriteBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &WriteBatchRawErrors{}
if err := p.Err.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
}
return nil
}
-func (p *NodeFetchBatchRawV2Result) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBatchRawV2_result"); err != nil {
+func (p *NodeWriteBatchRawResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeBatchRaw_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField0(oprot); err != nil {
- return err
- }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -18712,22 +20833,7 @@ func (p *NodeFetchBatchRawV2Result) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBatchRawV2Result) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := p.Success.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *NodeFetchBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteBatchRawResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -18742,36 +20848,36 @@ func (p *NodeFetchBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err err
return err
}
-func (p *NodeFetchBatchRawV2Result) String() string {
+func (p *NodeWriteBatchRawResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBatchRawV2Result(%+v)", *p)
+ return fmt.Sprintf("NodeWriteBatchRawResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeFetchBlocksRawArgs struct {
- Req *FetchBlocksRawRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeWriteBatchRawV2Args struct {
+ Req *WriteBatchRawV2Request `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeFetchBlocksRawArgs() *NodeFetchBlocksRawArgs {
- return &NodeFetchBlocksRawArgs{}
+func NewNodeWriteBatchRawV2Args() *NodeWriteBatchRawV2Args {
+ return &NodeWriteBatchRawV2Args{}
}
-var NodeFetchBlocksRawArgs_Req_DEFAULT *FetchBlocksRawRequest
+var NodeWriteBatchRawV2Args_Req_DEFAULT *WriteBatchRawV2Request
-func (p *NodeFetchBlocksRawArgs) GetReq() *FetchBlocksRawRequest {
+func (p *NodeWriteBatchRawV2Args) GetReq() *WriteBatchRawV2Request {
if !p.IsSetReq() {
- return NodeFetchBlocksRawArgs_Req_DEFAULT
+ return NodeWriteBatchRawV2Args_Req_DEFAULT
}
return p.Req
}
-func (p *NodeFetchBlocksRawArgs) IsSetReq() bool {
+func (p *NodeWriteBatchRawV2Args) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeFetchBlocksRawArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteBatchRawV2Args) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18804,16 +20910,16 @@ func (p *NodeFetchBlocksRawArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBlocksRawArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &FetchBlocksRawRequest{}
+func (p *NodeWriteBatchRawV2Args) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &WriteBatchRawV2Request{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeFetchBlocksRawArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBlocksRaw_args"); err != nil {
+func (p *NodeWriteBatchRawV2Args) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeBatchRawV2_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -18830,7 +20936,7 @@ func (p *NodeFetchBlocksRawArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBlocksRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteBatchRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -18843,51 +20949,36 @@ func (p *NodeFetchBlocksRawArgs) writeField1(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeFetchBlocksRawArgs) String() string {
+func (p *NodeWriteBatchRawV2Args) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBlocksRawArgs(%+v)", *p)
+ return fmt.Sprintf("NodeWriteBatchRawV2Args(%+v)", *p)
}
// Attributes:
-// - Success
// - Err
-type NodeFetchBlocksRawResult struct {
- Success *FetchBlocksRawResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
-}
-
-func NewNodeFetchBlocksRawResult() *NodeFetchBlocksRawResult {
- return &NodeFetchBlocksRawResult{}
+type NodeWriteBatchRawV2Result struct {
+ Err *WriteBatchRawErrors `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-var NodeFetchBlocksRawResult_Success_DEFAULT *FetchBlocksRawResult_
-
-func (p *NodeFetchBlocksRawResult) GetSuccess() *FetchBlocksRawResult_ {
- if !p.IsSetSuccess() {
- return NodeFetchBlocksRawResult_Success_DEFAULT
- }
- return p.Success
+func NewNodeWriteBatchRawV2Result() *NodeWriteBatchRawV2Result {
+ return &NodeWriteBatchRawV2Result{}
}
-var NodeFetchBlocksRawResult_Err_DEFAULT *Error
+var NodeWriteBatchRawV2Result_Err_DEFAULT *WriteBatchRawErrors
-func (p *NodeFetchBlocksRawResult) GetErr() *Error {
+func (p *NodeWriteBatchRawV2Result) GetErr() *WriteBatchRawErrors {
if !p.IsSetErr() {
- return NodeFetchBlocksRawResult_Err_DEFAULT
+ return NodeWriteBatchRawV2Result_Err_DEFAULT
}
return p.Err
}
-func (p *NodeFetchBlocksRawResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *NodeFetchBlocksRawResult) IsSetErr() bool {
+func (p *NodeWriteBatchRawV2Result) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeFetchBlocksRawResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteBatchRawV2Result) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -18901,10 +20992,6 @@ func (p *NodeFetchBlocksRawResult) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
- case 0:
- if err := p.ReadField0(iprot); err != nil {
- return err
- }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -18924,32 +21011,19 @@ func (p *NodeFetchBlocksRawResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBlocksRawResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &FetchBlocksRawResult_{}
- if err := p.Success.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
- }
- return nil
-}
-
-func (p *NodeFetchBlocksRawResult) ReadField1(iprot thrift.TProtocol) error {
- p.Err = &Error{
- Type: 0,
- }
+func (p *NodeWriteBatchRawV2Result) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &WriteBatchRawErrors{}
if err := p.Err.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
}
return nil
}
-func (p *NodeFetchBlocksRawResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBlocksRaw_result"); err != nil {
+func (p *NodeWriteBatchRawV2Result) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeBatchRawV2_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField0(oprot); err != nil {
- return err
- }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -18963,22 +21037,7 @@ func (p *NodeFetchBlocksRawResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBlocksRawResult) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := p.Success.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *NodeFetchBlocksRawResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -18993,36 +21052,36 @@ func (p *NodeFetchBlocksRawResult) writeField1(oprot thrift.TProtocol) (err erro
return err
}
-func (p *NodeFetchBlocksRawResult) String() string {
+func (p *NodeWriteBatchRawV2Result) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBlocksRawResult(%+v)", *p)
+ return fmt.Sprintf("NodeWriteBatchRawV2Result(%+v)", *p)
}
// Attributes:
// - Req
-type NodeFetchBlocksMetadataRawV2Args struct {
- Req *FetchBlocksMetadataRawV2Request `thrift:"req,1" db:"req" json:"req"`
+type NodeWriteTaggedBatchRawArgs struct {
+ Req *WriteTaggedBatchRawRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeFetchBlocksMetadataRawV2Args() *NodeFetchBlocksMetadataRawV2Args {
- return &NodeFetchBlocksMetadataRawV2Args{}
+func NewNodeWriteTaggedBatchRawArgs() *NodeWriteTaggedBatchRawArgs {
+ return &NodeWriteTaggedBatchRawArgs{}
}
-var NodeFetchBlocksMetadataRawV2Args_Req_DEFAULT *FetchBlocksMetadataRawV2Request
+var NodeWriteTaggedBatchRawArgs_Req_DEFAULT *WriteTaggedBatchRawRequest
-func (p *NodeFetchBlocksMetadataRawV2Args) GetReq() *FetchBlocksMetadataRawV2Request {
+func (p *NodeWriteTaggedBatchRawArgs) GetReq() *WriteTaggedBatchRawRequest {
if !p.IsSetReq() {
- return NodeFetchBlocksMetadataRawV2Args_Req_DEFAULT
+ return NodeWriteTaggedBatchRawArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeFetchBlocksMetadataRawV2Args) IsSetReq() bool {
+func (p *NodeWriteTaggedBatchRawArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeFetchBlocksMetadataRawV2Args) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedBatchRawArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19055,16 +21114,16 @@ func (p *NodeFetchBlocksMetadataRawV2Args) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBlocksMetadataRawV2Args) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &FetchBlocksMetadataRawV2Request{}
+func (p *NodeWriteTaggedBatchRawArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &WriteTaggedBatchRawRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeFetchBlocksMetadataRawV2Args) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBlocksMetadataRawV2_args"); err != nil {
+func (p *NodeWriteTaggedBatchRawArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeTaggedBatchRaw_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -19081,7 +21140,7 @@ func (p *NodeFetchBlocksMetadataRawV2Args) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeFetchBlocksMetadataRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteTaggedBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -19094,51 +21153,36 @@ func (p *NodeFetchBlocksMetadataRawV2Args) writeField1(oprot thrift.TProtocol) (
return err
}
-func (p *NodeFetchBlocksMetadataRawV2Args) String() string {
+func (p *NodeWriteTaggedBatchRawArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBlocksMetadataRawV2Args(%+v)", *p)
+ return fmt.Sprintf("NodeWriteTaggedBatchRawArgs(%+v)", *p)
}
// Attributes:
-// - Success
// - Err
-type NodeFetchBlocksMetadataRawV2Result struct {
- Success *FetchBlocksMetadataRawV2Result_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
-}
-
-func NewNodeFetchBlocksMetadataRawV2Result() *NodeFetchBlocksMetadataRawV2Result {
- return &NodeFetchBlocksMetadataRawV2Result{}
+type NodeWriteTaggedBatchRawResult struct {
+ Err *WriteBatchRawErrors `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-var NodeFetchBlocksMetadataRawV2Result_Success_DEFAULT *FetchBlocksMetadataRawV2Result_
-
-func (p *NodeFetchBlocksMetadataRawV2Result) GetSuccess() *FetchBlocksMetadataRawV2Result_ {
- if !p.IsSetSuccess() {
- return NodeFetchBlocksMetadataRawV2Result_Success_DEFAULT
- }
- return p.Success
+func NewNodeWriteTaggedBatchRawResult() *NodeWriteTaggedBatchRawResult {
+ return &NodeWriteTaggedBatchRawResult{}
}
-var NodeFetchBlocksMetadataRawV2Result_Err_DEFAULT *Error
+var NodeWriteTaggedBatchRawResult_Err_DEFAULT *WriteBatchRawErrors
-func (p *NodeFetchBlocksMetadataRawV2Result) GetErr() *Error {
+func (p *NodeWriteTaggedBatchRawResult) GetErr() *WriteBatchRawErrors {
if !p.IsSetErr() {
- return NodeFetchBlocksMetadataRawV2Result_Err_DEFAULT
+ return NodeWriteTaggedBatchRawResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeFetchBlocksMetadataRawV2Result) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *NodeFetchBlocksMetadataRawV2Result) IsSetErr() bool {
+func (p *NodeWriteTaggedBatchRawResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeFetchBlocksMetadataRawV2Result) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedBatchRawResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19152,10 +21196,6 @@ func (p *NodeFetchBlocksMetadataRawV2Result) Read(iprot thrift.TProtocol) error
break
}
switch fieldId {
- case 0:
- if err := p.ReadField0(iprot); err != nil {
- return err
- }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -19175,32 +21215,19 @@ func (p *NodeFetchBlocksMetadataRawV2Result) Read(iprot thrift.TProtocol) error
return nil
}
-func (p *NodeFetchBlocksMetadataRawV2Result) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &FetchBlocksMetadataRawV2Result_{}
- if err := p.Success.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
- }
- return nil
-}
-
-func (p *NodeFetchBlocksMetadataRawV2Result) ReadField1(iprot thrift.TProtocol) error {
- p.Err = &Error{
- Type: 0,
- }
+func (p *NodeWriteTaggedBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &WriteBatchRawErrors{}
if err := p.Err.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
}
return nil
}
-func (p *NodeFetchBlocksMetadataRawV2Result) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("fetchBlocksMetadataRawV2_result"); err != nil {
+func (p *NodeWriteTaggedBatchRawResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeTaggedBatchRaw_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField0(oprot); err != nil {
- return err
- }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -19214,22 +21241,7 @@ func (p *NodeFetchBlocksMetadataRawV2Result) Write(oprot thrift.TProtocol) error
return nil
}
-func (p *NodeFetchBlocksMetadataRawV2Result) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := p.Success.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *NodeFetchBlocksMetadataRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteTaggedBatchRawResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -19244,36 +21256,36 @@ func (p *NodeFetchBlocksMetadataRawV2Result) writeField1(oprot thrift.TProtocol)
return err
}
-func (p *NodeFetchBlocksMetadataRawV2Result) String() string {
+func (p *NodeWriteTaggedBatchRawResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeFetchBlocksMetadataRawV2Result(%+v)", *p)
+ return fmt.Sprintf("NodeWriteTaggedBatchRawResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeWriteBatchRawArgs struct {
- Req *WriteBatchRawRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeWriteTaggedBatchRawV2Args struct {
+ Req *WriteTaggedBatchRawV2Request `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeWriteBatchRawArgs() *NodeWriteBatchRawArgs {
- return &NodeWriteBatchRawArgs{}
+func NewNodeWriteTaggedBatchRawV2Args() *NodeWriteTaggedBatchRawV2Args {
+ return &NodeWriteTaggedBatchRawV2Args{}
}
-var NodeWriteBatchRawArgs_Req_DEFAULT *WriteBatchRawRequest
+var NodeWriteTaggedBatchRawV2Args_Req_DEFAULT *WriteTaggedBatchRawV2Request
-func (p *NodeWriteBatchRawArgs) GetReq() *WriteBatchRawRequest {
+func (p *NodeWriteTaggedBatchRawV2Args) GetReq() *WriteTaggedBatchRawV2Request {
if !p.IsSetReq() {
- return NodeWriteBatchRawArgs_Req_DEFAULT
+ return NodeWriteTaggedBatchRawV2Args_Req_DEFAULT
}
return p.Req
}
-func (p *NodeWriteBatchRawArgs) IsSetReq() bool {
+func (p *NodeWriteTaggedBatchRawV2Args) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeWriteBatchRawArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedBatchRawV2Args) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19306,16 +21318,16 @@ func (p *NodeWriteBatchRawArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &WriteBatchRawRequest{}
+func (p *NodeWriteTaggedBatchRawV2Args) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &WriteTaggedBatchRawV2Request{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeWriteBatchRawArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeBatchRaw_args"); err != nil {
+func (p *NodeWriteTaggedBatchRawV2Args) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeTaggedBatchRawV2_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -19332,7 +21344,7 @@ func (p *NodeWriteBatchRawArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteTaggedBatchRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -19345,36 +21357,36 @@ func (p *NodeWriteBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeWriteBatchRawArgs) String() string {
+func (p *NodeWriteTaggedBatchRawV2Args) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteBatchRawArgs(%+v)", *p)
+ return fmt.Sprintf("NodeWriteTaggedBatchRawV2Args(%+v)", *p)
}
// Attributes:
// - Err
-type NodeWriteBatchRawResult struct {
+type NodeWriteTaggedBatchRawV2Result struct {
Err *WriteBatchRawErrors `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeWriteBatchRawResult() *NodeWriteBatchRawResult {
- return &NodeWriteBatchRawResult{}
+func NewNodeWriteTaggedBatchRawV2Result() *NodeWriteTaggedBatchRawV2Result {
+ return &NodeWriteTaggedBatchRawV2Result{}
}
-var NodeWriteBatchRawResult_Err_DEFAULT *WriteBatchRawErrors
+var NodeWriteTaggedBatchRawV2Result_Err_DEFAULT *WriteBatchRawErrors
-func (p *NodeWriteBatchRawResult) GetErr() *WriteBatchRawErrors {
+func (p *NodeWriteTaggedBatchRawV2Result) GetErr() *WriteBatchRawErrors {
if !p.IsSetErr() {
- return NodeWriteBatchRawResult_Err_DEFAULT
+ return NodeWriteTaggedBatchRawV2Result_Err_DEFAULT
}
return p.Err
}
-func (p *NodeWriteBatchRawResult) IsSetErr() bool {
+func (p *NodeWriteTaggedBatchRawV2Result) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeWriteBatchRawResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedBatchRawV2Result) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19407,7 +21419,7 @@ func (p *NodeWriteBatchRawResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeWriteTaggedBatchRawV2Result) ReadField1(iprot thrift.TProtocol) error {
p.Err = &WriteBatchRawErrors{}
if err := p.Err.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
@@ -19415,8 +21427,8 @@ func (p *NodeWriteBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeBatchRaw_result"); err != nil {
+func (p *NodeWriteTaggedBatchRawV2Result) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("writeTaggedBatchRawV2_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -19433,7 +21445,7 @@ func (p *NodeWriteBatchRawResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeWriteTaggedBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -19448,36 +21460,21 @@ func (p *NodeWriteBatchRawResult) writeField1(oprot thrift.TProtocol) (err error
return err
}
-func (p *NodeWriteBatchRawResult) String() string {
+func (p *NodeWriteTaggedBatchRawV2Result) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteBatchRawResult(%+v)", *p)
-}
-
-// Attributes:
-// - Req
-type NodeWriteBatchRawV2Args struct {
- Req *WriteBatchRawV2Request `thrift:"req,1" db:"req" json:"req"`
+ return fmt.Sprintf("NodeWriteTaggedBatchRawV2Result(%+v)", *p)
}
-func NewNodeWriteBatchRawV2Args() *NodeWriteBatchRawV2Args {
- return &NodeWriteBatchRawV2Args{}
+type NodeRepairArgs struct {
}
-var NodeWriteBatchRawV2Args_Req_DEFAULT *WriteBatchRawV2Request
-
-func (p *NodeWriteBatchRawV2Args) GetReq() *WriteBatchRawV2Request {
- if !p.IsSetReq() {
- return NodeWriteBatchRawV2Args_Req_DEFAULT
- }
- return p.Req
-}
-func (p *NodeWriteBatchRawV2Args) IsSetReq() bool {
- return p.Req != nil
+func NewNodeRepairArgs() *NodeRepairArgs {
+ return &NodeRepairArgs{}
}
-func (p *NodeWriteBatchRawV2Args) Read(iprot thrift.TProtocol) error {
+func (p *NodeRepairArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19490,15 +21487,8 @@ func (p *NodeWriteBatchRawV2Args) Read(iprot thrift.TProtocol) error {
if fieldTypeId == thrift.STOP {
break
}
- switch fieldId {
- case 1:
- if err := p.ReadField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -19510,22 +21500,11 @@ func (p *NodeWriteBatchRawV2Args) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawV2Args) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &WriteBatchRawV2Request{}
- if err := p.Req.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
- }
- return nil
-}
-
-func (p *NodeWriteBatchRawV2Args) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeBatchRawV2_args"); err != nil {
+func (p *NodeRepairArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("repair_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField1(oprot); err != nil {
- return err
- }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -19536,49 +21515,36 @@ func (p *NodeWriteBatchRawV2Args) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
- }
- if err := p.Req.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
- }
- return err
-}
-
-func (p *NodeWriteBatchRawV2Args) String() string {
+func (p *NodeRepairArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteBatchRawV2Args(%+v)", *p)
+ return fmt.Sprintf("NodeRepairArgs(%+v)", *p)
}
// Attributes:
// - Err
-type NodeWriteBatchRawV2Result struct {
- Err *WriteBatchRawErrors `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeRepairResult struct {
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeWriteBatchRawV2Result() *NodeWriteBatchRawV2Result {
- return &NodeWriteBatchRawV2Result{}
+func NewNodeRepairResult() *NodeRepairResult {
+ return &NodeRepairResult{}
}
-var NodeWriteBatchRawV2Result_Err_DEFAULT *WriteBatchRawErrors
+var NodeRepairResult_Err_DEFAULT *Error
-func (p *NodeWriteBatchRawV2Result) GetErr() *WriteBatchRawErrors {
+func (p *NodeRepairResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeWriteBatchRawV2Result_Err_DEFAULT
+ return NodeRepairResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeWriteBatchRawV2Result) IsSetErr() bool {
+func (p *NodeRepairResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeWriteBatchRawV2Result) Read(iprot thrift.TProtocol) error {
+func (p *NodeRepairResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19611,16 +21577,18 @@ func (p *NodeWriteBatchRawV2Result) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawV2Result) ReadField1(iprot thrift.TProtocol) error {
- p.Err = &WriteBatchRawErrors{}
+func (p *NodeRepairResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &Error{
+ Type: 0,
+ }
if err := p.Err.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
}
return nil
}
-func (p *NodeWriteBatchRawV2Result) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeBatchRawV2_result"); err != nil {
+func (p *NodeRepairResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("repair_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -19637,7 +21605,7 @@ func (p *NodeWriteBatchRawV2Result) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeRepairResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -19652,36 +21620,36 @@ func (p *NodeWriteBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err err
return err
}
-func (p *NodeWriteBatchRawV2Result) String() string {
+func (p *NodeRepairResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteBatchRawV2Result(%+v)", *p)
+ return fmt.Sprintf("NodeRepairResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeWriteTaggedBatchRawArgs struct {
- Req *WriteTaggedBatchRawRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeTruncateArgs struct {
+ Req *TruncateRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeWriteTaggedBatchRawArgs() *NodeWriteTaggedBatchRawArgs {
- return &NodeWriteTaggedBatchRawArgs{}
+func NewNodeTruncateArgs() *NodeTruncateArgs {
+ return &NodeTruncateArgs{}
}
-var NodeWriteTaggedBatchRawArgs_Req_DEFAULT *WriteTaggedBatchRawRequest
+var NodeTruncateArgs_Req_DEFAULT *TruncateRequest
-func (p *NodeWriteTaggedBatchRawArgs) GetReq() *WriteTaggedBatchRawRequest {
+func (p *NodeTruncateArgs) GetReq() *TruncateRequest {
if !p.IsSetReq() {
- return NodeWriteTaggedBatchRawArgs_Req_DEFAULT
+ return NodeTruncateArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeWriteTaggedBatchRawArgs) IsSetReq() bool {
+func (p *NodeTruncateArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeWriteTaggedBatchRawArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeTruncateArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19714,16 +21682,16 @@ func (p *NodeWriteTaggedBatchRawArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &WriteTaggedBatchRawRequest{}
+func (p *NodeTruncateArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &TruncateRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeWriteTaggedBatchRawArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeTaggedBatchRaw_args"); err != nil {
+func (p *NodeTruncateArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("truncate_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -19740,7 +21708,7 @@ func (p *NodeWriteTaggedBatchRawArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeTruncateArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -19753,36 +21721,51 @@ func (p *NodeWriteTaggedBatchRawArgs) writeField1(oprot thrift.TProtocol) (err e
return err
}
-func (p *NodeWriteTaggedBatchRawArgs) String() string {
+func (p *NodeTruncateArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteTaggedBatchRawArgs(%+v)", *p)
+ return fmt.Sprintf("NodeTruncateArgs(%+v)", *p)
}
// Attributes:
+// - Success
// - Err
-type NodeWriteTaggedBatchRawResult struct {
- Err *WriteBatchRawErrors `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeTruncateResult struct {
+ Success *TruncateResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeWriteTaggedBatchRawResult() *NodeWriteTaggedBatchRawResult {
- return &NodeWriteTaggedBatchRawResult{}
+func NewNodeTruncateResult() *NodeTruncateResult {
+ return &NodeTruncateResult{}
}
-var NodeWriteTaggedBatchRawResult_Err_DEFAULT *WriteBatchRawErrors
+var NodeTruncateResult_Success_DEFAULT *TruncateResult_
-func (p *NodeWriteTaggedBatchRawResult) GetErr() *WriteBatchRawErrors {
+func (p *NodeTruncateResult) GetSuccess() *TruncateResult_ {
+ if !p.IsSetSuccess() {
+ return NodeTruncateResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeTruncateResult_Err_DEFAULT *Error
+
+func (p *NodeTruncateResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeWriteTaggedBatchRawResult_Err_DEFAULT
+ return NodeTruncateResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeWriteTaggedBatchRawResult) IsSetErr() bool {
+func (p *NodeTruncateResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeTruncateResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeWriteTaggedBatchRawResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeTruncateResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -19796,6 +21779,10 @@ func (p *NodeWriteTaggedBatchRawResult) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -19815,19 +21802,32 @@ func (p *NodeWriteTaggedBatchRawResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawResult) ReadField1(iprot thrift.TProtocol) error {
- p.Err = &WriteBatchRawErrors{}
+func (p *NodeTruncateResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &TruncateResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *NodeTruncateResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &Error{
+ Type: 0,
+ }
if err := p.Err.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
}
return nil
}
-func (p *NodeWriteTaggedBatchRawResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeTaggedBatchRaw_result"); err != nil {
+func (p *NodeTruncateResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("truncate_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -19841,7 +21841,22 @@ func (p *NodeWriteTaggedBatchRawResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeTruncateResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeTruncateResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -19856,57 +21871,35 @@ func (p *NodeWriteTaggedBatchRawResult) writeField1(oprot thrift.TProtocol) (err
return err
}
-func (p *NodeWriteTaggedBatchRawResult) String() string {
+func (p *NodeTruncateResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteTaggedBatchRawResult(%+v)", *p)
-}
-
-// Attributes:
-// - Req
-type NodeWriteTaggedBatchRawV2Args struct {
- Req *WriteTaggedBatchRawV2Request `thrift:"req,1" db:"req" json:"req"`
+ return fmt.Sprintf("NodeTruncateResult(%+v)", *p)
}
-func NewNodeWriteTaggedBatchRawV2Args() *NodeWriteTaggedBatchRawV2Args {
- return &NodeWriteTaggedBatchRawV2Args{}
+type NodeHealthArgs struct {
}
-var NodeWriteTaggedBatchRawV2Args_Req_DEFAULT *WriteTaggedBatchRawV2Request
-
-func (p *NodeWriteTaggedBatchRawV2Args) GetReq() *WriteTaggedBatchRawV2Request {
- if !p.IsSetReq() {
- return NodeWriteTaggedBatchRawV2Args_Req_DEFAULT
- }
- return p.Req
-}
-func (p *NodeWriteTaggedBatchRawV2Args) IsSetReq() bool {
- return p.Req != nil
+func NewNodeHealthArgs() *NodeHealthArgs {
+ return &NodeHealthArgs{}
}
-func (p *NodeWriteTaggedBatchRawV2Args) Read(iprot thrift.TProtocol) error {
+func (p *NodeHealthArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.ReadField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -19918,22 +21911,11 @@ func (p *NodeWriteTaggedBatchRawV2Args) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawV2Args) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &WriteTaggedBatchRawV2Request{}
- if err := p.Req.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
- }
- return nil
-}
-
-func (p *NodeWriteTaggedBatchRawV2Args) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeTaggedBatchRawV2_args"); err != nil {
+func (p *NodeHealthArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("health_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField1(oprot); err != nil {
- return err
- }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -19944,49 +21926,51 @@ func (p *NodeWriteTaggedBatchRawV2Args) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawV2Args) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
- }
- if err := p.Req.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
- }
- return err
-}
-
-func (p *NodeWriteTaggedBatchRawV2Args) String() string {
+func (p *NodeHealthArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteTaggedBatchRawV2Args(%+v)", *p)
+ return fmt.Sprintf("NodeHealthArgs(%+v)", *p)
}
// Attributes:
+// - Success
// - Err
-type NodeWriteTaggedBatchRawV2Result struct {
- Err *WriteBatchRawErrors `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeHealthResult struct {
+ Success *NodeHealthResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeWriteTaggedBatchRawV2Result() *NodeWriteTaggedBatchRawV2Result {
- return &NodeWriteTaggedBatchRawV2Result{}
+func NewNodeHealthResult() *NodeHealthResult {
+ return &NodeHealthResult{}
}
-var NodeWriteTaggedBatchRawV2Result_Err_DEFAULT *WriteBatchRawErrors
+var NodeHealthResult_Success_DEFAULT *NodeHealthResult_
-func (p *NodeWriteTaggedBatchRawV2Result) GetErr() *WriteBatchRawErrors {
+func (p *NodeHealthResult) GetSuccess() *NodeHealthResult_ {
+ if !p.IsSetSuccess() {
+ return NodeHealthResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeHealthResult_Err_DEFAULT *Error
+
+func (p *NodeHealthResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeWriteTaggedBatchRawV2Result_Err_DEFAULT
+ return NodeHealthResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeWriteTaggedBatchRawV2Result) IsSetErr() bool {
+func (p *NodeHealthResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeHealthResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeWriteTaggedBatchRawV2Result) Read(iprot thrift.TProtocol) error {
+func (p *NodeHealthResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20000,6 +21984,10 @@ func (p *NodeWriteTaggedBatchRawV2Result) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -20019,19 +22007,32 @@ func (p *NodeWriteTaggedBatchRawV2Result) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawV2Result) ReadField1(iprot thrift.TProtocol) error {
- p.Err = &WriteBatchRawErrors{}
+func (p *NodeHealthResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeHealthResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *NodeHealthResult) ReadField1(iprot thrift.TProtocol) error {
+ p.Err = &Error{
+ Type: 0,
+ }
if err := p.Err.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err)
}
return nil
}
-func (p *NodeWriteTaggedBatchRawV2Result) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("writeTaggedBatchRawV2_result"); err != nil {
+func (p *NodeHealthResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("health_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -20045,7 +22046,22 @@ func (p *NodeWriteTaggedBatchRawV2Result) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeWriteTaggedBatchRawV2Result) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeHealthResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeHealthResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -20060,21 +22076,21 @@ func (p *NodeWriteTaggedBatchRawV2Result) writeField1(oprot thrift.TProtocol) (e
return err
}
-func (p *NodeWriteTaggedBatchRawV2Result) String() string {
+func (p *NodeHealthResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeWriteTaggedBatchRawV2Result(%+v)", *p)
+ return fmt.Sprintf("NodeHealthResult(%+v)", *p)
}
-type NodeRepairArgs struct {
+type NodeBootstrappedArgs struct {
}
-func NewNodeRepairArgs() *NodeRepairArgs {
- return &NodeRepairArgs{}
+func NewNodeBootstrappedArgs() *NodeBootstrappedArgs {
+ return &NodeBootstrappedArgs{}
}
-func (p *NodeRepairArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeBootstrappedArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20100,8 +22116,8 @@ func (p *NodeRepairArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeRepairArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("repair_args"); err != nil {
+func (p *NodeBootstrappedArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("bootstrapped_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -20115,36 +22131,51 @@ func (p *NodeRepairArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeRepairArgs) String() string {
+func (p *NodeBootstrappedArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeRepairArgs(%+v)", *p)
+ return fmt.Sprintf("NodeBootstrappedArgs(%+v)", *p)
}
// Attributes:
+// - Success
// - Err
-type NodeRepairResult struct {
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeBootstrappedResult struct {
+ Success *NodeBootstrappedResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeRepairResult() *NodeRepairResult {
- return &NodeRepairResult{}
+func NewNodeBootstrappedResult() *NodeBootstrappedResult {
+ return &NodeBootstrappedResult{}
}
-var NodeRepairResult_Err_DEFAULT *Error
+var NodeBootstrappedResult_Success_DEFAULT *NodeBootstrappedResult_
-func (p *NodeRepairResult) GetErr() *Error {
+func (p *NodeBootstrappedResult) GetSuccess() *NodeBootstrappedResult_ {
+ if !p.IsSetSuccess() {
+ return NodeBootstrappedResult_Success_DEFAULT
+ }
+ return p.Success
+}
+
+var NodeBootstrappedResult_Err_DEFAULT *Error
+
+func (p *NodeBootstrappedResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeRepairResult_Err_DEFAULT
+ return NodeBootstrappedResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeRepairResult) IsSetErr() bool {
+func (p *NodeBootstrappedResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *NodeBootstrappedResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeRepairResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeBootstrappedResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20158,6 +22189,10 @@ func (p *NodeRepairResult) Read(iprot thrift.TProtocol) error {
break
}
switch fieldId {
+ case 0:
+ if err := p.ReadField0(iprot); err != nil {
+ return err
+ }
case 1:
if err := p.ReadField1(iprot); err != nil {
return err
@@ -20177,7 +22212,15 @@ func (p *NodeRepairResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeRepairResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeBootstrappedResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeBootstrappedResult_{}
+ if err := p.Success.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *NodeBootstrappedResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -20187,11 +22230,14 @@ func (p *NodeRepairResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeRepairResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("repair_result"); err != nil {
+func (p *NodeBootstrappedResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("bootstrapped_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField0(oprot); err != nil {
+ return err
+ }
if err := p.writeField1(oprot); err != nil {
return err
}
@@ -20205,7 +22251,22 @@ func (p *NodeRepairResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeRepairResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeBootstrappedResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+ }
+ }
+ return err
+}
+
+func (p *NodeBootstrappedResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -20220,36 +22281,21 @@ func (p *NodeRepairResult) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeRepairResult) String() string {
+func (p *NodeBootstrappedResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeRepairResult(%+v)", *p)
-}
-
-// Attributes:
-// - Req
-type NodeTruncateArgs struct {
- Req *TruncateRequest `thrift:"req,1" db:"req" json:"req"`
+ return fmt.Sprintf("NodeBootstrappedResult(%+v)", *p)
}
-func NewNodeTruncateArgs() *NodeTruncateArgs {
- return &NodeTruncateArgs{}
+type NodeBootstrappedInPlacementOrNoPlacementArgs struct {
}
-var NodeTruncateArgs_Req_DEFAULT *TruncateRequest
-
-func (p *NodeTruncateArgs) GetReq() *TruncateRequest {
- if !p.IsSetReq() {
- return NodeTruncateArgs_Req_DEFAULT
- }
- return p.Req
-}
-func (p *NodeTruncateArgs) IsSetReq() bool {
- return p.Req != nil
+func NewNodeBootstrappedInPlacementOrNoPlacementArgs() *NodeBootstrappedInPlacementOrNoPlacementArgs {
+ return &NodeBootstrappedInPlacementOrNoPlacementArgs{}
}
-func (p *NodeTruncateArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20262,15 +22308,8 @@ func (p *NodeTruncateArgs) Read(iprot thrift.TProtocol) error {
if fieldTypeId == thrift.STOP {
break
}
- switch fieldId {
- case 1:
- if err := p.ReadField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -20282,22 +22321,11 @@ func (p *NodeTruncateArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeTruncateArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &TruncateRequest{}
- if err := p.Req.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
- }
- return nil
-}
-
-func (p *NodeTruncateArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("truncate_args"); err != nil {
+func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("bootstrappedInPlacementOrNoPlacement_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField1(oprot); err != nil {
- return err
- }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -20308,64 +22336,51 @@ func (p *NodeTruncateArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeTruncateArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
- }
- if err := p.Req.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
- }
- return err
-}
-
-func (p *NodeTruncateArgs) String() string {
+func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeTruncateArgs(%+v)", *p)
+ return fmt.Sprintf("NodeBootstrappedInPlacementOrNoPlacementArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeTruncateResult struct {
- Success *TruncateResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeBootstrappedInPlacementOrNoPlacementResult struct {
+ Success *NodeBootstrappedInPlacementOrNoPlacementResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeTruncateResult() *NodeTruncateResult {
- return &NodeTruncateResult{}
+func NewNodeBootstrappedInPlacementOrNoPlacementResult() *NodeBootstrappedInPlacementOrNoPlacementResult {
+ return &NodeBootstrappedInPlacementOrNoPlacementResult{}
}
-var NodeTruncateResult_Success_DEFAULT *TruncateResult_
+var NodeBootstrappedInPlacementOrNoPlacementResult_Success_DEFAULT *NodeBootstrappedInPlacementOrNoPlacementResult_
-func (p *NodeTruncateResult) GetSuccess() *TruncateResult_ {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) GetSuccess() *NodeBootstrappedInPlacementOrNoPlacementResult_ {
if !p.IsSetSuccess() {
- return NodeTruncateResult_Success_DEFAULT
+ return NodeBootstrappedInPlacementOrNoPlacementResult_Success_DEFAULT
}
return p.Success
}
-var NodeTruncateResult_Err_DEFAULT *Error
+var NodeBootstrappedInPlacementOrNoPlacementResult_Err_DEFAULT *Error
-func (p *NodeTruncateResult) GetErr() *Error {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeTruncateResult_Err_DEFAULT
+ return NodeBootstrappedInPlacementOrNoPlacementResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeTruncateResult) IsSetSuccess() bool {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeTruncateResult) IsSetErr() bool {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeTruncateResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20402,15 +22417,15 @@ func (p *NodeTruncateResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeTruncateResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &TruncateResult_{}
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeBootstrappedInPlacementOrNoPlacementResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeTruncateResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -20420,8 +22435,8 @@ func (p *NodeTruncateResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeTruncateResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("truncate_result"); err != nil {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("bootstrappedInPlacementOrNoPlacement_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -20441,7 +22456,7 @@ func (p *NodeTruncateResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeTruncateResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -20456,7 +22471,7 @@ func (p *NodeTruncateResult) writeField0(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeTruncateResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -20471,21 +22486,21 @@ func (p *NodeTruncateResult) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeTruncateResult) String() string {
+func (p *NodeBootstrappedInPlacementOrNoPlacementResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeTruncateResult(%+v)", *p)
+ return fmt.Sprintf("NodeBootstrappedInPlacementOrNoPlacementResult(%+v)", *p)
}
-type NodeHealthArgs struct {
+type NodeGetPersistRateLimitArgs struct {
}
-func NewNodeHealthArgs() *NodeHealthArgs {
- return &NodeHealthArgs{}
+func NewNodeGetPersistRateLimitArgs() *NodeGetPersistRateLimitArgs {
+ return &NodeGetPersistRateLimitArgs{}
}
-func (p *NodeHealthArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20511,8 +22526,8 @@ func (p *NodeHealthArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeHealthArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("health_args"); err != nil {
+func (p *NodeGetPersistRateLimitArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getPersistRateLimit_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -20526,51 +22541,51 @@ func (p *NodeHealthArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeHealthArgs) String() string {
+func (p *NodeGetPersistRateLimitArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeHealthArgs(%+v)", *p)
+ return fmt.Sprintf("NodeGetPersistRateLimitArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeHealthResult struct {
- Success *NodeHealthResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeGetPersistRateLimitResult struct {
+ Success *NodePersistRateLimitResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeHealthResult() *NodeHealthResult {
- return &NodeHealthResult{}
+func NewNodeGetPersistRateLimitResult() *NodeGetPersistRateLimitResult {
+ return &NodeGetPersistRateLimitResult{}
}
-var NodeHealthResult_Success_DEFAULT *NodeHealthResult_
+var NodeGetPersistRateLimitResult_Success_DEFAULT *NodePersistRateLimitResult_
-func (p *NodeHealthResult) GetSuccess() *NodeHealthResult_ {
+func (p *NodeGetPersistRateLimitResult) GetSuccess() *NodePersistRateLimitResult_ {
if !p.IsSetSuccess() {
- return NodeHealthResult_Success_DEFAULT
+ return NodeGetPersistRateLimitResult_Success_DEFAULT
}
return p.Success
}
-var NodeHealthResult_Err_DEFAULT *Error
+var NodeGetPersistRateLimitResult_Err_DEFAULT *Error
-func (p *NodeHealthResult) GetErr() *Error {
+func (p *NodeGetPersistRateLimitResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeHealthResult_Err_DEFAULT
+ return NodeGetPersistRateLimitResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeHealthResult) IsSetSuccess() bool {
+func (p *NodeGetPersistRateLimitResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeHealthResult) IsSetErr() bool {
+func (p *NodeGetPersistRateLimitResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeHealthResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetPersistRateLimitResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20607,15 +22622,15 @@ func (p *NodeHealthResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeHealthResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeHealthResult_{}
+func (p *NodeGetPersistRateLimitResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodePersistRateLimitResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeHealthResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeGetPersistRateLimitResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -20625,8 +22640,8 @@ func (p *NodeHealthResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeHealthResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("health_result"); err != nil {
+func (p *NodeGetPersistRateLimitResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getPersistRateLimit_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -20646,7 +22661,7 @@ func (p *NodeHealthResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeHealthResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetPersistRateLimitResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -20661,7 +22676,7 @@ func (p *NodeHealthResult) writeField0(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeHealthResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetPersistRateLimitResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -20676,21 +22691,36 @@ func (p *NodeHealthResult) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
-func (p *NodeHealthResult) String() string {
+func (p *NodeGetPersistRateLimitResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeHealthResult(%+v)", *p)
+ return fmt.Sprintf("NodeGetPersistRateLimitResult(%+v)", *p)
}
-type NodeBootstrappedArgs struct {
+// Attributes:
+// - Req
+type NodeSetPersistRateLimitArgs struct {
+ Req *NodeSetPersistRateLimitRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeBootstrappedArgs() *NodeBootstrappedArgs {
- return &NodeBootstrappedArgs{}
+func NewNodeSetPersistRateLimitArgs() *NodeSetPersistRateLimitArgs {
+ return &NodeSetPersistRateLimitArgs{}
}
-func (p *NodeBootstrappedArgs) Read(iprot thrift.TProtocol) error {
+var NodeSetPersistRateLimitArgs_Req_DEFAULT *NodeSetPersistRateLimitRequest
+
+func (p *NodeSetPersistRateLimitArgs) GetReq() *NodeSetPersistRateLimitRequest {
+ if !p.IsSetReq() {
+ return NodeSetPersistRateLimitArgs_Req_DEFAULT
+ }
+ return p.Req
+}
+func (p *NodeSetPersistRateLimitArgs) IsSetReq() bool {
+ return p.Req != nil
+}
+
+func (p *NodeSetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20703,8 +22733,15 @@ func (p *NodeBootstrappedArgs) Read(iprot thrift.TProtocol) error {
if fieldTypeId == thrift.STOP {
break
}
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -20716,11 +22753,22 @@ func (p *NodeBootstrappedArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeBootstrappedArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("bootstrapped_args"); err != nil {
+func (p *NodeSetPersistRateLimitArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &NodeSetPersistRateLimitRequest{}
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeSetPersistRateLimitArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setPersistRateLimit_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -20731,51 +22779,64 @@ func (p *NodeBootstrappedArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeBootstrappedArgs) String() string {
+func (p *NodeSetPersistRateLimitArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeSetPersistRateLimitArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeBootstrappedArgs(%+v)", *p)
+ return fmt.Sprintf("NodeSetPersistRateLimitArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeBootstrappedResult struct {
- Success *NodeBootstrappedResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeSetPersistRateLimitResult struct {
+ Success *NodePersistRateLimitResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeBootstrappedResult() *NodeBootstrappedResult {
- return &NodeBootstrappedResult{}
+func NewNodeSetPersistRateLimitResult() *NodeSetPersistRateLimitResult {
+ return &NodeSetPersistRateLimitResult{}
}
-var NodeBootstrappedResult_Success_DEFAULT *NodeBootstrappedResult_
+var NodeSetPersistRateLimitResult_Success_DEFAULT *NodePersistRateLimitResult_
-func (p *NodeBootstrappedResult) GetSuccess() *NodeBootstrappedResult_ {
+func (p *NodeSetPersistRateLimitResult) GetSuccess() *NodePersistRateLimitResult_ {
if !p.IsSetSuccess() {
- return NodeBootstrappedResult_Success_DEFAULT
+ return NodeSetPersistRateLimitResult_Success_DEFAULT
}
return p.Success
}
-var NodeBootstrappedResult_Err_DEFAULT *Error
+var NodeSetPersistRateLimitResult_Err_DEFAULT *Error
-func (p *NodeBootstrappedResult) GetErr() *Error {
+func (p *NodeSetPersistRateLimitResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeBootstrappedResult_Err_DEFAULT
+ return NodeSetPersistRateLimitResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeBootstrappedResult) IsSetSuccess() bool {
+func (p *NodeSetPersistRateLimitResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeBootstrappedResult) IsSetErr() bool {
+func (p *NodeSetPersistRateLimitResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeBootstrappedResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeSetPersistRateLimitResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20812,15 +22873,15 @@ func (p *NodeBootstrappedResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeBootstrappedResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeBootstrappedResult_{}
+func (p *NodeSetPersistRateLimitResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodePersistRateLimitResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeBootstrappedResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeSetPersistRateLimitResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -20830,8 +22891,8 @@ func (p *NodeBootstrappedResult) ReadField1(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeBootstrappedResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("bootstrapped_result"); err != nil {
+func (p *NodeSetPersistRateLimitResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setPersistRateLimit_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -20851,7 +22912,7 @@ func (p *NodeBootstrappedResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeBootstrappedResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetPersistRateLimitResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -20866,7 +22927,7 @@ func (p *NodeBootstrappedResult) writeField0(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeBootstrappedResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetPersistRateLimitResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -20881,21 +22942,21 @@ func (p *NodeBootstrappedResult) writeField1(oprot thrift.TProtocol) (err error)
return err
}
-func (p *NodeBootstrappedResult) String() string {
+func (p *NodeSetPersistRateLimitResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeBootstrappedResult(%+v)", *p)
+ return fmt.Sprintf("NodeSetPersistRateLimitResult(%+v)", *p)
}
-type NodeBootstrappedInPlacementOrNoPlacementArgs struct {
+type NodeGetWriteNewSeriesAsyncArgs struct {
}
-func NewNodeBootstrappedInPlacementOrNoPlacementArgs() *NodeBootstrappedInPlacementOrNoPlacementArgs {
- return &NodeBootstrappedInPlacementOrNoPlacementArgs{}
+func NewNodeGetWriteNewSeriesAsyncArgs() *NodeGetWriteNewSeriesAsyncArgs {
+ return &NodeGetWriteNewSeriesAsyncArgs{}
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -20921,8 +22982,8 @@ func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) Read(iprot thrift.TProtoc
return nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("bootstrappedInPlacementOrNoPlacement_args"); err != nil {
+func (p *NodeGetWriteNewSeriesAsyncArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getWriteNewSeriesAsync_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -20936,51 +22997,51 @@ func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) Write(oprot thrift.TProto
return nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementArgs) String() string {
+func (p *NodeGetWriteNewSeriesAsyncArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeBootstrappedInPlacementOrNoPlacementArgs(%+v)", *p)
+ return fmt.Sprintf("NodeGetWriteNewSeriesAsyncArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeBootstrappedInPlacementOrNoPlacementResult struct {
- Success *NodeBootstrappedInPlacementOrNoPlacementResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeGetWriteNewSeriesAsyncResult struct {
+ Success *NodeWriteNewSeriesAsyncResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeBootstrappedInPlacementOrNoPlacementResult() *NodeBootstrappedInPlacementOrNoPlacementResult {
- return &NodeBootstrappedInPlacementOrNoPlacementResult{}
+func NewNodeGetWriteNewSeriesAsyncResult() *NodeGetWriteNewSeriesAsyncResult {
+ return &NodeGetWriteNewSeriesAsyncResult{}
}
-var NodeBootstrappedInPlacementOrNoPlacementResult_Success_DEFAULT *NodeBootstrappedInPlacementOrNoPlacementResult_
+var NodeGetWriteNewSeriesAsyncResult_Success_DEFAULT *NodeWriteNewSeriesAsyncResult_
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) GetSuccess() *NodeBootstrappedInPlacementOrNoPlacementResult_ {
+func (p *NodeGetWriteNewSeriesAsyncResult) GetSuccess() *NodeWriteNewSeriesAsyncResult_ {
if !p.IsSetSuccess() {
- return NodeBootstrappedInPlacementOrNoPlacementResult_Success_DEFAULT
+ return NodeGetWriteNewSeriesAsyncResult_Success_DEFAULT
}
return p.Success
}
-var NodeBootstrappedInPlacementOrNoPlacementResult_Err_DEFAULT *Error
+var NodeGetWriteNewSeriesAsyncResult_Err_DEFAULT *Error
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) GetErr() *Error {
+func (p *NodeGetWriteNewSeriesAsyncResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeBootstrappedInPlacementOrNoPlacementResult_Err_DEFAULT
+ return NodeGetWriteNewSeriesAsyncResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) IsSetSuccess() bool {
+func (p *NodeGetWriteNewSeriesAsyncResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) IsSetErr() bool {
+func (p *NodeGetWriteNewSeriesAsyncResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesAsyncResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21017,15 +23078,15 @@ func (p *NodeBootstrappedInPlacementOrNoPlacementResult) Read(iprot thrift.TProt
return nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeBootstrappedInPlacementOrNoPlacementResult_{}
+func (p *NodeGetWriteNewSeriesAsyncResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeWriteNewSeriesAsyncResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesAsyncResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -21035,8 +23096,8 @@ func (p *NodeBootstrappedInPlacementOrNoPlacementResult) ReadField1(iprot thrift
return nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("bootstrappedInPlacementOrNoPlacement_result"); err != nil {
+func (p *NodeGetWriteNewSeriesAsyncResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getWriteNewSeriesAsync_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -21056,7 +23117,7 @@ func (p *NodeBootstrappedInPlacementOrNoPlacementResult) Write(oprot thrift.TPro
return nil
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetWriteNewSeriesAsyncResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -21071,7 +23132,7 @@ func (p *NodeBootstrappedInPlacementOrNoPlacementResult) writeField0(oprot thrif
return err
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetWriteNewSeriesAsyncResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -21086,21 +23147,36 @@ func (p *NodeBootstrappedInPlacementOrNoPlacementResult) writeField1(oprot thrif
return err
}
-func (p *NodeBootstrappedInPlacementOrNoPlacementResult) String() string {
+func (p *NodeGetWriteNewSeriesAsyncResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeBootstrappedInPlacementOrNoPlacementResult(%+v)", *p)
+ return fmt.Sprintf("NodeGetWriteNewSeriesAsyncResult(%+v)", *p)
}
-type NodeGetPersistRateLimitArgs struct {
+// Attributes:
+// - Req
+type NodeSetWriteNewSeriesAsyncArgs struct {
+ Req *NodeSetWriteNewSeriesAsyncRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeGetPersistRateLimitArgs() *NodeGetPersistRateLimitArgs {
- return &NodeGetPersistRateLimitArgs{}
+func NewNodeSetWriteNewSeriesAsyncArgs() *NodeSetWriteNewSeriesAsyncArgs {
+ return &NodeSetWriteNewSeriesAsyncArgs{}
}
-func (p *NodeGetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
+var NodeSetWriteNewSeriesAsyncArgs_Req_DEFAULT *NodeSetWriteNewSeriesAsyncRequest
+
+func (p *NodeSetWriteNewSeriesAsyncArgs) GetReq() *NodeSetWriteNewSeriesAsyncRequest {
+ if !p.IsSetReq() {
+ return NodeSetWriteNewSeriesAsyncArgs_Req_DEFAULT
+ }
+ return p.Req
+}
+func (p *NodeSetWriteNewSeriesAsyncArgs) IsSetReq() bool {
+ return p.Req != nil
+}
+
+func (p *NodeSetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21113,8 +23189,15 @@ func (p *NodeGetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
if fieldTypeId == thrift.STOP {
break
}
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -21126,11 +23209,22 @@ func (p *NodeGetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetPersistRateLimitArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getPersistRateLimit_args"); err != nil {
+func (p *NodeSetWriteNewSeriesAsyncArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &NodeSetWriteNewSeriesAsyncRequest{}
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeSetWriteNewSeriesAsyncArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setWriteNewSeriesAsync_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -21141,51 +23235,64 @@ func (p *NodeGetPersistRateLimitArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetPersistRateLimitArgs) String() string {
+func (p *NodeSetWriteNewSeriesAsyncArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeSetWriteNewSeriesAsyncArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetPersistRateLimitArgs(%+v)", *p)
+ return fmt.Sprintf("NodeSetWriteNewSeriesAsyncArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeGetPersistRateLimitResult struct {
- Success *NodePersistRateLimitResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeSetWriteNewSeriesAsyncResult struct {
+ Success *NodeWriteNewSeriesAsyncResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeGetPersistRateLimitResult() *NodeGetPersistRateLimitResult {
- return &NodeGetPersistRateLimitResult{}
+func NewNodeSetWriteNewSeriesAsyncResult() *NodeSetWriteNewSeriesAsyncResult {
+ return &NodeSetWriteNewSeriesAsyncResult{}
}
-var NodeGetPersistRateLimitResult_Success_DEFAULT *NodePersistRateLimitResult_
+var NodeSetWriteNewSeriesAsyncResult_Success_DEFAULT *NodeWriteNewSeriesAsyncResult_
-func (p *NodeGetPersistRateLimitResult) GetSuccess() *NodePersistRateLimitResult_ {
+func (p *NodeSetWriteNewSeriesAsyncResult) GetSuccess() *NodeWriteNewSeriesAsyncResult_ {
if !p.IsSetSuccess() {
- return NodeGetPersistRateLimitResult_Success_DEFAULT
+ return NodeSetWriteNewSeriesAsyncResult_Success_DEFAULT
}
return p.Success
}
-var NodeGetPersistRateLimitResult_Err_DEFAULT *Error
+var NodeSetWriteNewSeriesAsyncResult_Err_DEFAULT *Error
-func (p *NodeGetPersistRateLimitResult) GetErr() *Error {
+func (p *NodeSetWriteNewSeriesAsyncResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeGetPersistRateLimitResult_Err_DEFAULT
+ return NodeSetWriteNewSeriesAsyncResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeGetPersistRateLimitResult) IsSetSuccess() bool {
+func (p *NodeSetWriteNewSeriesAsyncResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeGetPersistRateLimitResult) IsSetErr() bool {
+func (p *NodeSetWriteNewSeriesAsyncResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeGetPersistRateLimitResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeSetWriteNewSeriesAsyncResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21222,15 +23329,15 @@ func (p *NodeGetPersistRateLimitResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetPersistRateLimitResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodePersistRateLimitResult_{}
+func (p *NodeSetWriteNewSeriesAsyncResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeWriteNewSeriesAsyncResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeGetPersistRateLimitResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeSetWriteNewSeriesAsyncResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -21240,8 +23347,8 @@ func (p *NodeGetPersistRateLimitResult) ReadField1(iprot thrift.TProtocol) error
return nil
}
-func (p *NodeGetPersistRateLimitResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getPersistRateLimit_result"); err != nil {
+func (p *NodeSetWriteNewSeriesAsyncResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setWriteNewSeriesAsync_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -21261,7 +23368,7 @@ func (p *NodeGetPersistRateLimitResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetPersistRateLimitResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetWriteNewSeriesAsyncResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -21276,7 +23383,7 @@ func (p *NodeGetPersistRateLimitResult) writeField0(oprot thrift.TProtocol) (err
return err
}
-func (p *NodeGetPersistRateLimitResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetWriteNewSeriesAsyncResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -21291,36 +23398,21 @@ func (p *NodeGetPersistRateLimitResult) writeField1(oprot thrift.TProtocol) (err
return err
}
-func (p *NodeGetPersistRateLimitResult) String() string {
+func (p *NodeSetWriteNewSeriesAsyncResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetPersistRateLimitResult(%+v)", *p)
-}
-
-// Attributes:
-// - Req
-type NodeSetPersistRateLimitArgs struct {
- Req *NodeSetPersistRateLimitRequest `thrift:"req,1" db:"req" json:"req"`
+ return fmt.Sprintf("NodeSetWriteNewSeriesAsyncResult(%+v)", *p)
}
-func NewNodeSetPersistRateLimitArgs() *NodeSetPersistRateLimitArgs {
- return &NodeSetPersistRateLimitArgs{}
+type NodeGetWriteNewSeriesBackoffDurationArgs struct {
}
-var NodeSetPersistRateLimitArgs_Req_DEFAULT *NodeSetPersistRateLimitRequest
-
-func (p *NodeSetPersistRateLimitArgs) GetReq() *NodeSetPersistRateLimitRequest {
- if !p.IsSetReq() {
- return NodeSetPersistRateLimitArgs_Req_DEFAULT
- }
- return p.Req
-}
-func (p *NodeSetPersistRateLimitArgs) IsSetReq() bool {
- return p.Req != nil
+func NewNodeGetWriteNewSeriesBackoffDurationArgs() *NodeGetWriteNewSeriesBackoffDurationArgs {
+ return &NodeGetWriteNewSeriesBackoffDurationArgs{}
}
-func (p *NodeSetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesBackoffDurationArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21333,15 +23425,8 @@ func (p *NodeSetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
if fieldTypeId == thrift.STOP {
break
}
- switch fieldId {
- case 1:
- if err := p.ReadField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -21353,22 +23438,11 @@ func (p *NodeSetPersistRateLimitArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetPersistRateLimitArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &NodeSetPersistRateLimitRequest{}
- if err := p.Req.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
- }
- return nil
-}
-
-func (p *NodeSetPersistRateLimitArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setPersistRateLimit_args"); err != nil {
+func (p *NodeGetWriteNewSeriesBackoffDurationArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getWriteNewSeriesBackoffDuration_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField1(oprot); err != nil {
- return err
- }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -21379,64 +23453,51 @@ func (p *NodeSetPersistRateLimitArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetPersistRateLimitArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
- }
- if err := p.Req.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
- }
- return err
-}
-
-func (p *NodeSetPersistRateLimitArgs) String() string {
+func (p *NodeGetWriteNewSeriesBackoffDurationArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetPersistRateLimitArgs(%+v)", *p)
+ return fmt.Sprintf("NodeGetWriteNewSeriesBackoffDurationArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeSetPersistRateLimitResult struct {
- Success *NodePersistRateLimitResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeGetWriteNewSeriesBackoffDurationResult struct {
+ Success *NodeWriteNewSeriesBackoffDurationResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeSetPersistRateLimitResult() *NodeSetPersistRateLimitResult {
- return &NodeSetPersistRateLimitResult{}
+func NewNodeGetWriteNewSeriesBackoffDurationResult() *NodeGetWriteNewSeriesBackoffDurationResult {
+ return &NodeGetWriteNewSeriesBackoffDurationResult{}
}
-var NodeSetPersistRateLimitResult_Success_DEFAULT *NodePersistRateLimitResult_
+var NodeGetWriteNewSeriesBackoffDurationResult_Success_DEFAULT *NodeWriteNewSeriesBackoffDurationResult_
-func (p *NodeSetPersistRateLimitResult) GetSuccess() *NodePersistRateLimitResult_ {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) GetSuccess() *NodeWriteNewSeriesBackoffDurationResult_ {
if !p.IsSetSuccess() {
- return NodeSetPersistRateLimitResult_Success_DEFAULT
+ return NodeGetWriteNewSeriesBackoffDurationResult_Success_DEFAULT
}
return p.Success
}
-var NodeSetPersistRateLimitResult_Err_DEFAULT *Error
+var NodeGetWriteNewSeriesBackoffDurationResult_Err_DEFAULT *Error
-func (p *NodeSetPersistRateLimitResult) GetErr() *Error {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeSetPersistRateLimitResult_Err_DEFAULT
+ return NodeGetWriteNewSeriesBackoffDurationResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeSetPersistRateLimitResult) IsSetSuccess() bool {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeSetPersistRateLimitResult) IsSetErr() bool {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeSetPersistRateLimitResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21473,15 +23534,15 @@ func (p *NodeSetPersistRateLimitResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetPersistRateLimitResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodePersistRateLimitResult_{}
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeWriteNewSeriesBackoffDurationResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeSetPersistRateLimitResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -21491,8 +23552,8 @@ func (p *NodeSetPersistRateLimitResult) ReadField1(iprot thrift.TProtocol) error
return nil
}
-func (p *NodeSetPersistRateLimitResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setPersistRateLimit_result"); err != nil {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getWriteNewSeriesBackoffDuration_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -21512,7 +23573,7 @@ func (p *NodeSetPersistRateLimitResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetPersistRateLimitResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -21527,7 +23588,7 @@ func (p *NodeSetPersistRateLimitResult) writeField0(oprot thrift.TProtocol) (err
return err
}
-func (p *NodeSetPersistRateLimitResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -21542,21 +23603,36 @@ func (p *NodeSetPersistRateLimitResult) writeField1(oprot thrift.TProtocol) (err
return err
}
-func (p *NodeSetPersistRateLimitResult) String() string {
+func (p *NodeGetWriteNewSeriesBackoffDurationResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetPersistRateLimitResult(%+v)", *p)
+ return fmt.Sprintf("NodeGetWriteNewSeriesBackoffDurationResult(%+v)", *p)
}
-type NodeGetWriteNewSeriesAsyncArgs struct {
+// Attributes:
+// - Req
+type NodeSetWriteNewSeriesBackoffDurationArgs struct {
+ Req *NodeSetWriteNewSeriesBackoffDurationRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeGetWriteNewSeriesAsyncArgs() *NodeGetWriteNewSeriesAsyncArgs {
- return &NodeGetWriteNewSeriesAsyncArgs{}
+func NewNodeSetWriteNewSeriesBackoffDurationArgs() *NodeSetWriteNewSeriesBackoffDurationArgs {
+ return &NodeSetWriteNewSeriesBackoffDurationArgs{}
}
-func (p *NodeGetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
+var NodeSetWriteNewSeriesBackoffDurationArgs_Req_DEFAULT *NodeSetWriteNewSeriesBackoffDurationRequest
+
+func (p *NodeSetWriteNewSeriesBackoffDurationArgs) GetReq() *NodeSetWriteNewSeriesBackoffDurationRequest {
+ if !p.IsSetReq() {
+ return NodeSetWriteNewSeriesBackoffDurationArgs_Req_DEFAULT
+ }
+ return p.Req
+}
+func (p *NodeSetWriteNewSeriesBackoffDurationArgs) IsSetReq() bool {
+ return p.Req != nil
+}
+
+func (p *NodeSetWriteNewSeriesBackoffDurationArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21569,8 +23645,15 @@ func (p *NodeGetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
if fieldTypeId == thrift.STOP {
break
}
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -21582,11 +23665,24 @@ func (p *NodeGetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetWriteNewSeriesAsyncArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getWriteNewSeriesAsync_args"); err != nil {
+func (p *NodeSetWriteNewSeriesBackoffDurationArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &NodeSetWriteNewSeriesBackoffDurationRequest{
+ DurationType: 2,
+ }
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeSetWriteNewSeriesBackoffDurationArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setWriteNewSeriesBackoffDuration_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -21597,51 +23693,64 @@ func (p *NodeGetWriteNewSeriesAsyncArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetWriteNewSeriesAsyncArgs) String() string {
+func (p *NodeSetWriteNewSeriesBackoffDurationArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeSetWriteNewSeriesBackoffDurationArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetWriteNewSeriesAsyncArgs(%+v)", *p)
+ return fmt.Sprintf("NodeSetWriteNewSeriesBackoffDurationArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeGetWriteNewSeriesAsyncResult struct {
- Success *NodeWriteNewSeriesAsyncResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeSetWriteNewSeriesBackoffDurationResult struct {
+ Success *NodeWriteNewSeriesBackoffDurationResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeGetWriteNewSeriesAsyncResult() *NodeGetWriteNewSeriesAsyncResult {
- return &NodeGetWriteNewSeriesAsyncResult{}
+func NewNodeSetWriteNewSeriesBackoffDurationResult() *NodeSetWriteNewSeriesBackoffDurationResult {
+ return &NodeSetWriteNewSeriesBackoffDurationResult{}
}
-var NodeGetWriteNewSeriesAsyncResult_Success_DEFAULT *NodeWriteNewSeriesAsyncResult_
+var NodeSetWriteNewSeriesBackoffDurationResult_Success_DEFAULT *NodeWriteNewSeriesBackoffDurationResult_
-func (p *NodeGetWriteNewSeriesAsyncResult) GetSuccess() *NodeWriteNewSeriesAsyncResult_ {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) GetSuccess() *NodeWriteNewSeriesBackoffDurationResult_ {
if !p.IsSetSuccess() {
- return NodeGetWriteNewSeriesAsyncResult_Success_DEFAULT
+ return NodeSetWriteNewSeriesBackoffDurationResult_Success_DEFAULT
}
return p.Success
}
-var NodeGetWriteNewSeriesAsyncResult_Err_DEFAULT *Error
+var NodeSetWriteNewSeriesBackoffDurationResult_Err_DEFAULT *Error
-func (p *NodeGetWriteNewSeriesAsyncResult) GetErr() *Error {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeGetWriteNewSeriesAsyncResult_Err_DEFAULT
+ return NodeSetWriteNewSeriesBackoffDurationResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeGetWriteNewSeriesAsyncResult) IsSetSuccess() bool {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeGetWriteNewSeriesAsyncResult) IsSetErr() bool {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeGetWriteNewSeriesAsyncResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21678,15 +23787,15 @@ func (p *NodeGetWriteNewSeriesAsyncResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetWriteNewSeriesAsyncResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeWriteNewSeriesAsyncResult_{}
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeWriteNewSeriesBackoffDurationResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeGetWriteNewSeriesAsyncResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -21696,8 +23805,8 @@ func (p *NodeGetWriteNewSeriesAsyncResult) ReadField1(iprot thrift.TProtocol) er
return nil
}
-func (p *NodeGetWriteNewSeriesAsyncResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getWriteNewSeriesAsync_result"); err != nil {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setWriteNewSeriesBackoffDuration_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -21717,7 +23826,7 @@ func (p *NodeGetWriteNewSeriesAsyncResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeGetWriteNewSeriesAsyncResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -21732,7 +23841,7 @@ func (p *NodeGetWriteNewSeriesAsyncResult) writeField0(oprot thrift.TProtocol) (
return err
}
-func (p *NodeGetWriteNewSeriesAsyncResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -21747,36 +23856,21 @@ func (p *NodeGetWriteNewSeriesAsyncResult) writeField1(oprot thrift.TProtocol) (
return err
}
-func (p *NodeGetWriteNewSeriesAsyncResult) String() string {
+func (p *NodeSetWriteNewSeriesBackoffDurationResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetWriteNewSeriesAsyncResult(%+v)", *p)
-}
-
-// Attributes:
-// - Req
-type NodeSetWriteNewSeriesAsyncArgs struct {
- Req *NodeSetWriteNewSeriesAsyncRequest `thrift:"req,1" db:"req" json:"req"`
+ return fmt.Sprintf("NodeSetWriteNewSeriesBackoffDurationResult(%+v)", *p)
}
-func NewNodeSetWriteNewSeriesAsyncArgs() *NodeSetWriteNewSeriesAsyncArgs {
- return &NodeSetWriteNewSeriesAsyncArgs{}
+type NodeGetWriteNewSeriesLimitPerShardPerSecondArgs struct {
}
-var NodeSetWriteNewSeriesAsyncArgs_Req_DEFAULT *NodeSetWriteNewSeriesAsyncRequest
-
-func (p *NodeSetWriteNewSeriesAsyncArgs) GetReq() *NodeSetWriteNewSeriesAsyncRequest {
- if !p.IsSetReq() {
- return NodeSetWriteNewSeriesAsyncArgs_Req_DEFAULT
- }
- return p.Req
-}
-func (p *NodeSetWriteNewSeriesAsyncArgs) IsSetReq() bool {
- return p.Req != nil
+func NewNodeGetWriteNewSeriesLimitPerShardPerSecondArgs() *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs {
+ return &NodeGetWriteNewSeriesLimitPerShardPerSecondArgs{}
}
-func (p *NodeSetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21789,15 +23883,8 @@ func (p *NodeSetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
if fieldTypeId == thrift.STOP {
break
}
- switch fieldId {
- case 1:
- if err := p.ReadField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -21809,22 +23896,11 @@ func (p *NodeSetWriteNewSeriesAsyncArgs) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetWriteNewSeriesAsyncArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &NodeSetWriteNewSeriesAsyncRequest{}
- if err := p.Req.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
- }
- return nil
-}
-
-func (p *NodeSetWriteNewSeriesAsyncArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setWriteNewSeriesAsync_args"); err != nil {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getWriteNewSeriesLimitPerShardPerSecond_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
- if err := p.writeField1(oprot); err != nil {
- return err
- }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -21835,64 +23911,51 @@ func (p *NodeSetWriteNewSeriesAsyncArgs) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetWriteNewSeriesAsyncArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
- }
- if err := p.Req.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
- }
- return err
-}
-
-func (p *NodeSetWriteNewSeriesAsyncArgs) String() string {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetWriteNewSeriesAsyncArgs(%+v)", *p)
+ return fmt.Sprintf("NodeGetWriteNewSeriesLimitPerShardPerSecondArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeSetWriteNewSeriesAsyncResult struct {
- Success *NodeWriteNewSeriesAsyncResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeGetWriteNewSeriesLimitPerShardPerSecondResult struct {
+ Success *NodeWriteNewSeriesLimitPerShardPerSecondResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeSetWriteNewSeriesAsyncResult() *NodeSetWriteNewSeriesAsyncResult {
- return &NodeSetWriteNewSeriesAsyncResult{}
+func NewNodeGetWriteNewSeriesLimitPerShardPerSecondResult() *NodeGetWriteNewSeriesLimitPerShardPerSecondResult {
+ return &NodeGetWriteNewSeriesLimitPerShardPerSecondResult{}
}
-var NodeSetWriteNewSeriesAsyncResult_Success_DEFAULT *NodeWriteNewSeriesAsyncResult_
+var NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT *NodeWriteNewSeriesLimitPerShardPerSecondResult_
-func (p *NodeSetWriteNewSeriesAsyncResult) GetSuccess() *NodeWriteNewSeriesAsyncResult_ {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) GetSuccess() *NodeWriteNewSeriesLimitPerShardPerSecondResult_ {
if !p.IsSetSuccess() {
- return NodeSetWriteNewSeriesAsyncResult_Success_DEFAULT
+ return NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT
}
return p.Success
}
-var NodeSetWriteNewSeriesAsyncResult_Err_DEFAULT *Error
+var NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT *Error
-func (p *NodeSetWriteNewSeriesAsyncResult) GetErr() *Error {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeSetWriteNewSeriesAsyncResult_Err_DEFAULT
+ return NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeSetWriteNewSeriesAsyncResult) IsSetSuccess() bool {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeSetWriteNewSeriesAsyncResult) IsSetErr() bool {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeSetWriteNewSeriesAsyncResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -21929,15 +23992,15 @@ func (p *NodeSetWriteNewSeriesAsyncResult) Read(iprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetWriteNewSeriesAsyncResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeWriteNewSeriesAsyncResult_{}
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeWriteNewSeriesLimitPerShardPerSecondResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeSetWriteNewSeriesAsyncResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -21947,8 +24010,8 @@ func (p *NodeSetWriteNewSeriesAsyncResult) ReadField1(iprot thrift.TProtocol) er
return nil
}
-func (p *NodeSetWriteNewSeriesAsyncResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setWriteNewSeriesAsync_result"); err != nil {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("getWriteNewSeriesLimitPerShardPerSecond_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -21968,7 +24031,7 @@ func (p *NodeSetWriteNewSeriesAsyncResult) Write(oprot thrift.TProtocol) error {
return nil
}
-func (p *NodeSetWriteNewSeriesAsyncResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -21983,7 +24046,7 @@ func (p *NodeSetWriteNewSeriesAsyncResult) writeField0(oprot thrift.TProtocol) (
return err
}
-func (p *NodeSetWriteNewSeriesAsyncResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -21998,21 +24061,36 @@ func (p *NodeSetWriteNewSeriesAsyncResult) writeField1(oprot thrift.TProtocol) (
return err
}
-func (p *NodeSetWriteNewSeriesAsyncResult) String() string {
+func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetWriteNewSeriesAsyncResult(%+v)", *p)
+ return fmt.Sprintf("NodeGetWriteNewSeriesLimitPerShardPerSecondResult(%+v)", *p)
}
-type NodeGetWriteNewSeriesBackoffDurationArgs struct {
+// Attributes:
+// - Req
+type NodeSetWriteNewSeriesLimitPerShardPerSecondArgs struct {
+ Req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest `thrift:"req,1" db:"req" json:"req"`
+}
+
+func NewNodeSetWriteNewSeriesLimitPerShardPerSecondArgs() *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs {
+ return &NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{}
+}
+
+var NodeSetWriteNewSeriesLimitPerShardPerSecondArgs_Req_DEFAULT *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest
+
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) GetReq() *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest {
+ if !p.IsSetReq() {
+ return NodeSetWriteNewSeriesLimitPerShardPerSecondArgs_Req_DEFAULT
+ }
+ return p.Req
}
-
-func NewNodeGetWriteNewSeriesBackoffDurationArgs() *NodeGetWriteNewSeriesBackoffDurationArgs {
- return &NodeGetWriteNewSeriesBackoffDurationArgs{}
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) IsSetReq() bool {
+ return p.Req != nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22025,8 +24103,15 @@ func (p *NodeGetWriteNewSeriesBackoffDurationArgs) Read(iprot thrift.TProtocol)
if fieldTypeId == thrift.STOP {
break
}
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -22038,11 +24123,22 @@ func (p *NodeGetWriteNewSeriesBackoffDurationArgs) Read(iprot thrift.TProtocol)
return nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getWriteNewSeriesBackoffDuration_args"); err != nil {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &NodeSetWriteNewSeriesLimitPerShardPerSecondRequest{}
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setWriteNewSeriesLimitPerShardPerSecond_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -22053,51 +24149,64 @@ func (p *NodeGetWriteNewSeriesBackoffDurationArgs) Write(oprot thrift.TProtocol)
return nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationArgs) String() string {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetWriteNewSeriesBackoffDurationArgs(%+v)", *p)
+ return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeGetWriteNewSeriesBackoffDurationResult struct {
- Success *NodeWriteNewSeriesBackoffDurationResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeSetWriteNewSeriesLimitPerShardPerSecondResult struct {
+ Success *NodeWriteNewSeriesLimitPerShardPerSecondResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeGetWriteNewSeriesBackoffDurationResult() *NodeGetWriteNewSeriesBackoffDurationResult {
- return &NodeGetWriteNewSeriesBackoffDurationResult{}
+func NewNodeSetWriteNewSeriesLimitPerShardPerSecondResult() *NodeSetWriteNewSeriesLimitPerShardPerSecondResult {
+ return &NodeSetWriteNewSeriesLimitPerShardPerSecondResult{}
}
-var NodeGetWriteNewSeriesBackoffDurationResult_Success_DEFAULT *NodeWriteNewSeriesBackoffDurationResult_
+var NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT *NodeWriteNewSeriesLimitPerShardPerSecondResult_
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) GetSuccess() *NodeWriteNewSeriesBackoffDurationResult_ {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) GetSuccess() *NodeWriteNewSeriesLimitPerShardPerSecondResult_ {
if !p.IsSetSuccess() {
- return NodeGetWriteNewSeriesBackoffDurationResult_Success_DEFAULT
+ return NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT
}
return p.Success
}
-var NodeGetWriteNewSeriesBackoffDurationResult_Err_DEFAULT *Error
+var NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT *Error
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) GetErr() *Error {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeGetWriteNewSeriesBackoffDurationResult_Err_DEFAULT
+ return NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) IsSetSuccess() bool {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) IsSetErr() bool {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22134,15 +24243,15 @@ func (p *NodeGetWriteNewSeriesBackoffDurationResult) Read(iprot thrift.TProtocol
return nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeWriteNewSeriesBackoffDurationResult_{}
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &NodeWriteNewSeriesLimitPerShardPerSecondResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -22152,8 +24261,8 @@ func (p *NodeGetWriteNewSeriesBackoffDurationResult) ReadField1(iprot thrift.TPr
return nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getWriteNewSeriesBackoffDuration_result"); err != nil {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("setWriteNewSeriesLimitPerShardPerSecond_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -22173,7 +24282,7 @@ func (p *NodeGetWriteNewSeriesBackoffDurationResult) Write(oprot thrift.TProtoco
return nil
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -22188,7 +24297,7 @@ func (p *NodeGetWriteNewSeriesBackoffDurationResult) writeField0(oprot thrift.TP
return err
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -22203,36 +24312,36 @@ func (p *NodeGetWriteNewSeriesBackoffDurationResult) writeField1(oprot thrift.TP
return err
}
-func (p *NodeGetWriteNewSeriesBackoffDurationResult) String() string {
+func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetWriteNewSeriesBackoffDurationResult(%+v)", *p)
+ return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeSetWriteNewSeriesBackoffDurationArgs struct {
- Req *NodeSetWriteNewSeriesBackoffDurationRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeDebugProfileStartArgs struct {
+ Req *DebugProfileStartRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeSetWriteNewSeriesBackoffDurationArgs() *NodeSetWriteNewSeriesBackoffDurationArgs {
- return &NodeSetWriteNewSeriesBackoffDurationArgs{}
+func NewNodeDebugProfileStartArgs() *NodeDebugProfileStartArgs {
+ return &NodeDebugProfileStartArgs{}
}
-var NodeSetWriteNewSeriesBackoffDurationArgs_Req_DEFAULT *NodeSetWriteNewSeriesBackoffDurationRequest
+var NodeDebugProfileStartArgs_Req_DEFAULT *DebugProfileStartRequest
-func (p *NodeSetWriteNewSeriesBackoffDurationArgs) GetReq() *NodeSetWriteNewSeriesBackoffDurationRequest {
+func (p *NodeDebugProfileStartArgs) GetReq() *DebugProfileStartRequest {
if !p.IsSetReq() {
- return NodeSetWriteNewSeriesBackoffDurationArgs_Req_DEFAULT
+ return NodeDebugProfileStartArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeSetWriteNewSeriesBackoffDurationArgs) IsSetReq() bool {
+func (p *NodeDebugProfileStartArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeDebugProfileStartArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22265,18 +24374,16 @@ func (p *NodeSetWriteNewSeriesBackoffDurationArgs) Read(iprot thrift.TProtocol)
return nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &NodeSetWriteNewSeriesBackoffDurationRequest{
- DurationType: 2,
- }
+func (p *NodeDebugProfileStartArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &DebugProfileStartRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setWriteNewSeriesBackoffDuration_args"); err != nil {
+func (p *NodeDebugProfileStartArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("debugProfileStart_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -22293,7 +24400,7 @@ func (p *NodeSetWriteNewSeriesBackoffDurationArgs) Write(oprot thrift.TProtocol)
return nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugProfileStartArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -22306,51 +24413,51 @@ func (p *NodeSetWriteNewSeriesBackoffDurationArgs) writeField1(oprot thrift.TPro
return err
}
-func (p *NodeSetWriteNewSeriesBackoffDurationArgs) String() string {
+func (p *NodeDebugProfileStartArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetWriteNewSeriesBackoffDurationArgs(%+v)", *p)
+ return fmt.Sprintf("NodeDebugProfileStartArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeSetWriteNewSeriesBackoffDurationResult struct {
- Success *NodeWriteNewSeriesBackoffDurationResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeDebugProfileStartResult struct {
+ Success *DebugProfileStartResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeSetWriteNewSeriesBackoffDurationResult() *NodeSetWriteNewSeriesBackoffDurationResult {
- return &NodeSetWriteNewSeriesBackoffDurationResult{}
+func NewNodeDebugProfileStartResult() *NodeDebugProfileStartResult {
+ return &NodeDebugProfileStartResult{}
}
-var NodeSetWriteNewSeriesBackoffDurationResult_Success_DEFAULT *NodeWriteNewSeriesBackoffDurationResult_
+var NodeDebugProfileStartResult_Success_DEFAULT *DebugProfileStartResult_
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) GetSuccess() *NodeWriteNewSeriesBackoffDurationResult_ {
+func (p *NodeDebugProfileStartResult) GetSuccess() *DebugProfileStartResult_ {
if !p.IsSetSuccess() {
- return NodeSetWriteNewSeriesBackoffDurationResult_Success_DEFAULT
+ return NodeDebugProfileStartResult_Success_DEFAULT
}
return p.Success
}
-var NodeSetWriteNewSeriesBackoffDurationResult_Err_DEFAULT *Error
+var NodeDebugProfileStartResult_Err_DEFAULT *Error
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) GetErr() *Error {
+func (p *NodeDebugProfileStartResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeSetWriteNewSeriesBackoffDurationResult_Err_DEFAULT
+ return NodeDebugProfileStartResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) IsSetSuccess() bool {
+func (p *NodeDebugProfileStartResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) IsSetErr() bool {
+func (p *NodeDebugProfileStartResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeDebugProfileStartResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22387,15 +24494,15 @@ func (p *NodeSetWriteNewSeriesBackoffDurationResult) Read(iprot thrift.TProtocol
return nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeWriteNewSeriesBackoffDurationResult_{}
+func (p *NodeDebugProfileStartResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &DebugProfileStartResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeDebugProfileStartResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -22405,8 +24512,8 @@ func (p *NodeSetWriteNewSeriesBackoffDurationResult) ReadField1(iprot thrift.TPr
return nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setWriteNewSeriesBackoffDuration_result"); err != nil {
+func (p *NodeDebugProfileStartResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("debugProfileStart_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -22426,7 +24533,7 @@ func (p *NodeSetWriteNewSeriesBackoffDurationResult) Write(oprot thrift.TProtoco
return nil
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugProfileStartResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -22441,7 +24548,7 @@ func (p *NodeSetWriteNewSeriesBackoffDurationResult) writeField0(oprot thrift.TP
return err
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugProfileStartResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -22456,21 +24563,36 @@ func (p *NodeSetWriteNewSeriesBackoffDurationResult) writeField1(oprot thrift.TP
return err
}
-func (p *NodeSetWriteNewSeriesBackoffDurationResult) String() string {
+func (p *NodeDebugProfileStartResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetWriteNewSeriesBackoffDurationResult(%+v)", *p)
+ return fmt.Sprintf("NodeDebugProfileStartResult(%+v)", *p)
}
-type NodeGetWriteNewSeriesLimitPerShardPerSecondArgs struct {
+// Attributes:
+// - Req
+type NodeDebugProfileStopArgs struct {
+ Req *DebugProfileStopRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeGetWriteNewSeriesLimitPerShardPerSecondArgs() *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs {
- return &NodeGetWriteNewSeriesLimitPerShardPerSecondArgs{}
+func NewNodeDebugProfileStopArgs() *NodeDebugProfileStopArgs {
+ return &NodeDebugProfileStopArgs{}
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) Read(iprot thrift.TProtocol) error {
+var NodeDebugProfileStopArgs_Req_DEFAULT *DebugProfileStopRequest
+
+func (p *NodeDebugProfileStopArgs) GetReq() *DebugProfileStopRequest {
+ if !p.IsSetReq() {
+ return NodeDebugProfileStopArgs_Req_DEFAULT
+ }
+ return p.Req
+}
+func (p *NodeDebugProfileStopArgs) IsSetReq() bool {
+ return p.Req != nil
+}
+
+func (p *NodeDebugProfileStopArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22483,8 +24605,15 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) Read(iprot thrift.TPro
if fieldTypeId == thrift.STOP {
break
}
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
+ switch fieldId {
+ case 1:
+ if err := p.ReadField1(iprot); err != nil {
+ return err
+ }
+ default:
+ if err := iprot.Skip(fieldTypeId); err != nil {
+ return err
+ }
}
if err := iprot.ReadFieldEnd(); err != nil {
return err
@@ -22496,11 +24625,22 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) Read(iprot thrift.TPro
return nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getWriteNewSeriesLimitPerShardPerSecond_args"); err != nil {
+func (p *NodeDebugProfileStopArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &DebugProfileStopRequest{}
+ if err := p.Req.Read(iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
+ }
+ return nil
+}
+
+func (p *NodeDebugProfileStopArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("debugProfileStop_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
+ if err := p.writeField1(oprot); err != nil {
+ return err
+ }
}
if err := oprot.WriteFieldStop(); err != nil {
return thrift.PrependError("write field stop error: ", err)
@@ -22511,51 +24651,64 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) Write(oprot thrift.TPr
return nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondArgs) String() string {
+func (p *NodeDebugProfileStopArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
+ }
+ if err := p.Req.Write(oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err)
+ }
+ if err := oprot.WriteFieldEnd(); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err)
+ }
+ return err
+}
+
+func (p *NodeDebugProfileStopArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetWriteNewSeriesLimitPerShardPerSecondArgs(%+v)", *p)
+ return fmt.Sprintf("NodeDebugProfileStopArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeGetWriteNewSeriesLimitPerShardPerSecondResult struct {
- Success *NodeWriteNewSeriesLimitPerShardPerSecondResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeDebugProfileStopResult struct {
+ Success *DebugProfileStopResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeGetWriteNewSeriesLimitPerShardPerSecondResult() *NodeGetWriteNewSeriesLimitPerShardPerSecondResult {
- return &NodeGetWriteNewSeriesLimitPerShardPerSecondResult{}
+func NewNodeDebugProfileStopResult() *NodeDebugProfileStopResult {
+ return &NodeDebugProfileStopResult{}
}
-var NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT *NodeWriteNewSeriesLimitPerShardPerSecondResult_
+var NodeDebugProfileStopResult_Success_DEFAULT *DebugProfileStopResult_
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) GetSuccess() *NodeWriteNewSeriesLimitPerShardPerSecondResult_ {
+func (p *NodeDebugProfileStopResult) GetSuccess() *DebugProfileStopResult_ {
if !p.IsSetSuccess() {
- return NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT
+ return NodeDebugProfileStopResult_Success_DEFAULT
}
return p.Success
}
-var NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT *Error
+var NodeDebugProfileStopResult_Err_DEFAULT *Error
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) GetErr() *Error {
+func (p *NodeDebugProfileStopResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeGetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT
+ return NodeDebugProfileStopResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) IsSetSuccess() bool {
+func (p *NodeDebugProfileStopResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) IsSetErr() bool {
+func (p *NodeDebugProfileStopResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeDebugProfileStopResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22592,15 +24745,15 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) Read(iprot thrift.TP
return nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeWriteNewSeriesLimitPerShardPerSecondResult_{}
+func (p *NodeDebugProfileStopResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &DebugProfileStopResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeDebugProfileStopResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -22610,8 +24763,8 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) ReadField1(iprot thr
return nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("getWriteNewSeriesLimitPerShardPerSecond_result"); err != nil {
+func (p *NodeDebugProfileStopResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("debugProfileStop_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -22631,7 +24784,7 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) Write(oprot thrift.T
return nil
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugProfileStopResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -22646,7 +24799,7 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) writeField0(oprot th
return err
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugProfileStopResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -22661,36 +24814,36 @@ func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) writeField1(oprot th
return err
}
-func (p *NodeGetWriteNewSeriesLimitPerShardPerSecondResult) String() string {
+func (p *NodeDebugProfileStopResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeGetWriteNewSeriesLimitPerShardPerSecondResult(%+v)", *p)
+ return fmt.Sprintf("NodeDebugProfileStopResult(%+v)", *p)
}
// Attributes:
// - Req
-type NodeSetWriteNewSeriesLimitPerShardPerSecondArgs struct {
- Req *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest `thrift:"req,1" db:"req" json:"req"`
+type NodeDebugIndexMemorySegmentsArgs struct {
+ Req *DebugIndexMemorySegmentsRequest `thrift:"req,1" db:"req" json:"req"`
}
-func NewNodeSetWriteNewSeriesLimitPerShardPerSecondArgs() *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs {
- return &NodeSetWriteNewSeriesLimitPerShardPerSecondArgs{}
+func NewNodeDebugIndexMemorySegmentsArgs() *NodeDebugIndexMemorySegmentsArgs {
+ return &NodeDebugIndexMemorySegmentsArgs{}
}
-var NodeSetWriteNewSeriesLimitPerShardPerSecondArgs_Req_DEFAULT *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest
+var NodeDebugIndexMemorySegmentsArgs_Req_DEFAULT *DebugIndexMemorySegmentsRequest
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) GetReq() *NodeSetWriteNewSeriesLimitPerShardPerSecondRequest {
+func (p *NodeDebugIndexMemorySegmentsArgs) GetReq() *DebugIndexMemorySegmentsRequest {
if !p.IsSetReq() {
- return NodeSetWriteNewSeriesLimitPerShardPerSecondArgs_Req_DEFAULT
+ return NodeDebugIndexMemorySegmentsArgs_Req_DEFAULT
}
return p.Req
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) IsSetReq() bool {
+func (p *NodeDebugIndexMemorySegmentsArgs) IsSetReq() bool {
return p.Req != nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) Read(iprot thrift.TProtocol) error {
+func (p *NodeDebugIndexMemorySegmentsArgs) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22723,16 +24876,16 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) Read(iprot thrift.TPro
return nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Req = &NodeSetWriteNewSeriesLimitPerShardPerSecondRequest{}
+func (p *NodeDebugIndexMemorySegmentsArgs) ReadField1(iprot thrift.TProtocol) error {
+ p.Req = &DebugIndexMemorySegmentsRequest{}
if err := p.Req.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err)
}
return nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setWriteNewSeriesLimitPerShardPerSecond_args"); err != nil {
+func (p *NodeDebugIndexMemorySegmentsArgs) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("debugIndexMemorySegments_args"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -22749,7 +24902,7 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) Write(oprot thrift.TPr
return nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugIndexMemorySegmentsArgs) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err)
}
@@ -22762,51 +24915,51 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) writeField1(oprot thri
return err
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondArgs) String() string {
+func (p *NodeDebugIndexMemorySegmentsArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondArgs(%+v)", *p)
+ return fmt.Sprintf("NodeDebugIndexMemorySegmentsArgs(%+v)", *p)
}
// Attributes:
// - Success
// - Err
-type NodeSetWriteNewSeriesLimitPerShardPerSecondResult struct {
- Success *NodeWriteNewSeriesLimitPerShardPerSecondResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
- Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
+type NodeDebugIndexMemorySegmentsResult struct {
+ Success *DebugIndexMemorySegmentsResult_ `thrift:"success,0" db:"success" json:"success,omitempty"`
+ Err *Error `thrift:"err,1" db:"err" json:"err,omitempty"`
}
-func NewNodeSetWriteNewSeriesLimitPerShardPerSecondResult() *NodeSetWriteNewSeriesLimitPerShardPerSecondResult {
- return &NodeSetWriteNewSeriesLimitPerShardPerSecondResult{}
+func NewNodeDebugIndexMemorySegmentsResult() *NodeDebugIndexMemorySegmentsResult {
+ return &NodeDebugIndexMemorySegmentsResult{}
}
-var NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT *NodeWriteNewSeriesLimitPerShardPerSecondResult_
+var NodeDebugIndexMemorySegmentsResult_Success_DEFAULT *DebugIndexMemorySegmentsResult_
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) GetSuccess() *NodeWriteNewSeriesLimitPerShardPerSecondResult_ {
+func (p *NodeDebugIndexMemorySegmentsResult) GetSuccess() *DebugIndexMemorySegmentsResult_ {
if !p.IsSetSuccess() {
- return NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Success_DEFAULT
+ return NodeDebugIndexMemorySegmentsResult_Success_DEFAULT
}
return p.Success
}
-var NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT *Error
+var NodeDebugIndexMemorySegmentsResult_Err_DEFAULT *Error
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) GetErr() *Error {
+func (p *NodeDebugIndexMemorySegmentsResult) GetErr() *Error {
if !p.IsSetErr() {
- return NodeSetWriteNewSeriesLimitPerShardPerSecondResult_Err_DEFAULT
+ return NodeDebugIndexMemorySegmentsResult_Err_DEFAULT
}
return p.Err
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) IsSetSuccess() bool {
+func (p *NodeDebugIndexMemorySegmentsResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) IsSetErr() bool {
+func (p *NodeDebugIndexMemorySegmentsResult) IsSetErr() bool {
return p.Err != nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) Read(iprot thrift.TProtocol) error {
+func (p *NodeDebugIndexMemorySegmentsResult) Read(iprot thrift.TProtocol) error {
if _, err := iprot.ReadStructBegin(); err != nil {
return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
}
@@ -22843,15 +24996,15 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) Read(iprot thrift.TP
return nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = &NodeWriteNewSeriesLimitPerShardPerSecondResult_{}
+func (p *NodeDebugIndexMemorySegmentsResult) ReadField0(iprot thrift.TProtocol) error {
+ p.Success = &DebugIndexMemorySegmentsResult_{}
if err := p.Success.Read(iprot); err != nil {
return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
}
return nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) ReadField1(iprot thrift.TProtocol) error {
+func (p *NodeDebugIndexMemorySegmentsResult) ReadField1(iprot thrift.TProtocol) error {
p.Err = &Error{
Type: 0,
}
@@ -22861,8 +25014,8 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) ReadField1(iprot thr
return nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("setWriteNewSeriesLimitPerShardPerSecond_result"); err != nil {
+func (p *NodeDebugIndexMemorySegmentsResult) Write(oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin("debugIndexMemorySegments_result"); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
}
if p != nil {
@@ -22882,7 +25035,7 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) Write(oprot thrift.T
return nil
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugIndexMemorySegmentsResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
@@ -22897,7 +25050,7 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) writeField0(oprot th
return err
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *NodeDebugIndexMemorySegmentsResult) writeField1(oprot thrift.TProtocol) (err error) {
if p.IsSetErr() {
if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err)
@@ -22912,11 +25065,11 @@ func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) writeField1(oprot th
return err
}
-func (p *NodeSetWriteNewSeriesLimitPerShardPerSecondResult) String() string {
+func (p *NodeDebugIndexMemorySegmentsResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("NodeSetWriteNewSeriesLimitPerShardPerSecondResult(%+v)", *p)
+ return fmt.Sprintf("NodeDebugIndexMemorySegmentsResult(%+v)", *p)
}
type Cluster interface {
@@ -23013,16 +25166,16 @@ func (p *ClusterClient) recvHealth() (value *HealthResult_, err error) {
return
}
if mTypeId == thrift.EXCEPTION {
- error211 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error212 error
- error212, err = error211.Read(iprot)
+ error237 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error238 error
+ error238, err = error237.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error212
+ err = error238
return
}
if mTypeId != thrift.REPLY {
@@ -23094,16 +25247,16 @@ func (p *ClusterClient) recvWrite() (err error) {
return
}
if mTypeId == thrift.EXCEPTION {
- error213 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error214 error
- error214, err = error213.Read(iprot)
+ error239 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error240 error
+ error240, err = error239.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error214
+ err = error240
return
}
if mTypeId != thrift.REPLY {
@@ -23174,16 +25327,16 @@ func (p *ClusterClient) recvWriteTagged() (err error) {
return
}
if mTypeId == thrift.EXCEPTION {
- error215 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error216 error
- error216, err = error215.Read(iprot)
+ error241 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error242 error
+ error242, err = error241.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error216
+ err = error242
return
}
if mTypeId != thrift.REPLY {
@@ -23254,16 +25407,16 @@ func (p *ClusterClient) recvQuery() (value *QueryResult_, err error) {
return
}
if mTypeId == thrift.EXCEPTION {
- error217 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error218 error
- error218, err = error217.Read(iprot)
+ error243 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error244 error
+ error244, err = error243.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error218
+ err = error244
return
}
if mTypeId != thrift.REPLY {
@@ -23335,16 +25488,16 @@ func (p *ClusterClient) recvAggregate() (value *AggregateQueryResult_, err error
return
}
if mTypeId == thrift.EXCEPTION {
- error219 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error220 error
- error220, err = error219.Read(iprot)
+ error245 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error246 error
+ error246, err = error245.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error220
+ err = error246
return
}
if mTypeId != thrift.REPLY {
@@ -23416,16 +25569,16 @@ func (p *ClusterClient) recvFetch() (value *FetchResult_, err error) {
return
}
if mTypeId == thrift.EXCEPTION {
- error221 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error222 error
- error222, err = error221.Read(iprot)
+ error247 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error248 error
+ error248, err = error247.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error222
+ err = error248
return
}
if mTypeId != thrift.REPLY {
@@ -23497,16 +25650,16 @@ func (p *ClusterClient) recvTruncate() (value *TruncateResult_, err error) {
return
}
if mTypeId == thrift.EXCEPTION {
- error223 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error224 error
- error224, err = error223.Read(iprot)
+ error249 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
+ var error250 error
+ error250, err = error249.Read(iprot)
if err != nil {
return
}
if err = iprot.ReadMessageEnd(); err != nil {
return
}
- err = error224
+ err = error250
return
}
if mTypeId != thrift.REPLY {
@@ -23548,15 +25701,15 @@ func (p *ClusterProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
func NewClusterProcessor(handler Cluster) *ClusterProcessor {
- self225 := &ClusterProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self225.processorMap["health"] = &clusterProcessorHealth{handler: handler}
- self225.processorMap["write"] = &clusterProcessorWrite{handler: handler}
- self225.processorMap["writeTagged"] = &clusterProcessorWriteTagged{handler: handler}
- self225.processorMap["query"] = &clusterProcessorQuery{handler: handler}
- self225.processorMap["aggregate"] = &clusterProcessorAggregate{handler: handler}
- self225.processorMap["fetch"] = &clusterProcessorFetch{handler: handler}
- self225.processorMap["truncate"] = &clusterProcessorTruncate{handler: handler}
- return self225
+ self251 := &ClusterProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self251.processorMap["health"] = &clusterProcessorHealth{handler: handler}
+ self251.processorMap["write"] = &clusterProcessorWrite{handler: handler}
+ self251.processorMap["writeTagged"] = &clusterProcessorWriteTagged{handler: handler}
+ self251.processorMap["query"] = &clusterProcessorQuery{handler: handler}
+ self251.processorMap["aggregate"] = &clusterProcessorAggregate{handler: handler}
+ self251.processorMap["fetch"] = &clusterProcessorFetch{handler: handler}
+ self251.processorMap["truncate"] = &clusterProcessorTruncate{handler: handler}
+ return self251
}
func (p *ClusterProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
@@ -23569,12 +25722,12 @@ func (p *ClusterProcessor) Process(iprot, oprot thrift.TProtocol) (success bool,
}
iprot.Skip(thrift.STRUCT)
iprot.ReadMessageEnd()
- x226 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ x252 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x226.Write(oprot)
+ x252.Write(oprot)
oprot.WriteMessageEnd()
oprot.Flush()
- return false, x226
+ return false, x252
}
diff --git a/src/dbnode/generated/thrift/rpc/rpc_mock.go b/src/dbnode/generated/thrift/rpc/rpc_mock.go
index b0114716a3..647e08992b 100644
--- a/src/dbnode/generated/thrift/rpc/rpc_mock.go
+++ b/src/dbnode/generated/thrift/rpc/rpc_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/generated/thrift/rpc/tchan-go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -240,6 +240,51 @@ func (mr *MockTChanNodeMockRecorder) BootstrappedInPlacementOrNoPlacement(ctx in
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrappedInPlacementOrNoPlacement", reflect.TypeOf((*MockTChanNode)(nil).BootstrappedInPlacementOrNoPlacement), ctx)
}
+// DebugIndexMemorySegments mocks base method
+func (m *MockTChanNode) DebugIndexMemorySegments(ctx thrift.Context, req *DebugIndexMemorySegmentsRequest) (*DebugIndexMemorySegmentsResult_, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DebugIndexMemorySegments", ctx, req)
+ ret0, _ := ret[0].(*DebugIndexMemorySegmentsResult_)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DebugIndexMemorySegments indicates an expected call of DebugIndexMemorySegments
+func (mr *MockTChanNodeMockRecorder) DebugIndexMemorySegments(ctx, req interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugIndexMemorySegments", reflect.TypeOf((*MockTChanNode)(nil).DebugIndexMemorySegments), ctx, req)
+}
+
+// DebugProfileStart mocks base method
+func (m *MockTChanNode) DebugProfileStart(ctx thrift.Context, req *DebugProfileStartRequest) (*DebugProfileStartResult_, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DebugProfileStart", ctx, req)
+ ret0, _ := ret[0].(*DebugProfileStartResult_)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DebugProfileStart indicates an expected call of DebugProfileStart
+func (mr *MockTChanNodeMockRecorder) DebugProfileStart(ctx, req interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStart", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStart), ctx, req)
+}
+
+// DebugProfileStop mocks base method
+func (m *MockTChanNode) DebugProfileStop(ctx thrift.Context, req *DebugProfileStopRequest) (*DebugProfileStopResult_, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DebugProfileStop", ctx, req)
+ ret0, _ := ret[0].(*DebugProfileStopResult_)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DebugProfileStop indicates an expected call of DebugProfileStop
+func (mr *MockTChanNodeMockRecorder) DebugProfileStop(ctx, req interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugProfileStop", reflect.TypeOf((*MockTChanNode)(nil).DebugProfileStop), ctx, req)
+}
+
// Fetch mocks base method
func (m *MockTChanNode) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) {
m.ctrl.T.Helper()
diff --git a/src/dbnode/generated/thrift/rpc/tchan-rpc.go b/src/dbnode/generated/thrift/rpc/tchan-rpc.go
index 4e9cd5b396..3d32c85d9e 100644
--- a/src/dbnode/generated/thrift/rpc/tchan-rpc.go
+++ b/src/dbnode/generated/thrift/rpc/tchan-rpc.go
@@ -1,6 +1,6 @@
// @generated Code generated by thrift-gen. Do not modify.
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -49,6 +49,9 @@ type TChanNode interface {
AggregateRaw(ctx thrift.Context, req *AggregateQueryRawRequest) (*AggregateQueryRawResult_, error)
Bootstrapped(ctx thrift.Context) (*NodeBootstrappedResult_, error)
BootstrappedInPlacementOrNoPlacement(ctx thrift.Context) (*NodeBootstrappedInPlacementOrNoPlacementResult_, error)
+ DebugIndexMemorySegments(ctx thrift.Context, req *DebugIndexMemorySegmentsRequest) (*DebugIndexMemorySegmentsResult_, error)
+ DebugProfileStart(ctx thrift.Context, req *DebugProfileStartRequest) (*DebugProfileStartResult_, error)
+ DebugProfileStop(ctx thrift.Context, req *DebugProfileStopRequest) (*DebugProfileStopResult_, error)
Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error)
FetchBatchRaw(ctx thrift.Context, req *FetchBatchRawRequest) (*FetchBatchRawResult_, error)
FetchBatchRawV2(ctx thrift.Context, req *FetchBatchRawV2Request) (*FetchBatchRawResult_, error)
@@ -547,6 +550,60 @@ func (c *tchanNodeClient) BootstrappedInPlacementOrNoPlacement(ctx thrift.Contex
return resp.GetSuccess(), err
}
+func (c *tchanNodeClient) DebugIndexMemorySegments(ctx thrift.Context, req *DebugIndexMemorySegmentsRequest) (*DebugIndexMemorySegmentsResult_, error) {
+ var resp NodeDebugIndexMemorySegmentsResult
+ args := NodeDebugIndexMemorySegmentsArgs{
+ Req: req,
+ }
+ success, err := c.client.Call(ctx, c.thriftService, "debugIndexMemorySegments", &args, &resp)
+ if err == nil && !success {
+ switch {
+ case resp.Err != nil:
+ err = resp.Err
+ default:
+ err = fmt.Errorf("received no result or unknown exception for debugIndexMemorySegments")
+ }
+ }
+
+ return resp.GetSuccess(), err
+}
+
+func (c *tchanNodeClient) DebugProfileStart(ctx thrift.Context, req *DebugProfileStartRequest) (*DebugProfileStartResult_, error) {
+ var resp NodeDebugProfileStartResult
+ args := NodeDebugProfileStartArgs{
+ Req: req,
+ }
+ success, err := c.client.Call(ctx, c.thriftService, "debugProfileStart", &args, &resp)
+ if err == nil && !success {
+ switch {
+ case resp.Err != nil:
+ err = resp.Err
+ default:
+ err = fmt.Errorf("received no result or unknown exception for debugProfileStart")
+ }
+ }
+
+ return resp.GetSuccess(), err
+}
+
+func (c *tchanNodeClient) DebugProfileStop(ctx thrift.Context, req *DebugProfileStopRequest) (*DebugProfileStopResult_, error) {
+ var resp NodeDebugProfileStopResult
+ args := NodeDebugProfileStopArgs{
+ Req: req,
+ }
+ success, err := c.client.Call(ctx, c.thriftService, "debugProfileStop", &args, &resp)
+ if err == nil && !success {
+ switch {
+ case resp.Err != nil:
+ err = resp.Err
+ default:
+ err = fmt.Errorf("received no result or unknown exception for debugProfileStop")
+ }
+ }
+
+ return resp.GetSuccess(), err
+}
+
func (c *tchanNodeClient) Fetch(ctx thrift.Context, req *FetchRequest) (*FetchResult_, error) {
var resp NodeFetchResult
args := NodeFetchArgs{
@@ -989,6 +1046,9 @@ func (s *tchanNodeServer) Methods() []string {
"aggregateRaw",
"bootstrapped",
"bootstrappedInPlacementOrNoPlacement",
+ "debugIndexMemorySegments",
+ "debugProfileStart",
+ "debugProfileStop",
"fetch",
"fetchBatchRaw",
"fetchBatchRawV2",
@@ -1026,6 +1086,12 @@ func (s *tchanNodeServer) Handle(ctx thrift.Context, methodName string, protocol
return s.handleBootstrapped(ctx, protocol)
case "bootstrappedInPlacementOrNoPlacement":
return s.handleBootstrappedInPlacementOrNoPlacement(ctx, protocol)
+ case "debugIndexMemorySegments":
+ return s.handleDebugIndexMemorySegments(ctx, protocol)
+ case "debugProfileStart":
+ return s.handleDebugProfileStart(ctx, protocol)
+ case "debugProfileStop":
+ return s.handleDebugProfileStop(ctx, protocol)
case "fetch":
return s.handleFetch(ctx, protocol)
case "fetchBatchRaw":
@@ -1192,6 +1258,90 @@ func (s *tchanNodeServer) handleBootstrappedInPlacementOrNoPlacement(ctx thrift.
return err == nil, &res, nil
}
+func (s *tchanNodeServer) handleDebugIndexMemorySegments(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) {
+ var req NodeDebugIndexMemorySegmentsArgs
+ var res NodeDebugIndexMemorySegmentsResult
+
+ if err := req.Read(protocol); err != nil {
+ return false, nil, err
+ }
+
+ r, err :=
+ s.handler.DebugIndexMemorySegments(ctx, req.Req)
+
+ if err != nil {
+ switch v := err.(type) {
+ case *Error:
+ if v == nil {
+ return false, nil, fmt.Errorf("Handler for err returned non-nil error type *Error but nil value")
+ }
+ res.Err = v
+ default:
+ return false, nil, err
+ }
+ } else {
+ res.Success = r
+ }
+
+ return err == nil, &res, nil
+}
+
+func (s *tchanNodeServer) handleDebugProfileStart(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) {
+ var req NodeDebugProfileStartArgs
+ var res NodeDebugProfileStartResult
+
+ if err := req.Read(protocol); err != nil {
+ return false, nil, err
+ }
+
+ r, err :=
+ s.handler.DebugProfileStart(ctx, req.Req)
+
+ if err != nil {
+ switch v := err.(type) {
+ case *Error:
+ if v == nil {
+ return false, nil, fmt.Errorf("Handler for err returned non-nil error type *Error but nil value")
+ }
+ res.Err = v
+ default:
+ return false, nil, err
+ }
+ } else {
+ res.Success = r
+ }
+
+ return err == nil, &res, nil
+}
+
+func (s *tchanNodeServer) handleDebugProfileStop(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) {
+ var req NodeDebugProfileStopArgs
+ var res NodeDebugProfileStopResult
+
+ if err := req.Read(protocol); err != nil {
+ return false, nil, err
+ }
+
+ r, err :=
+ s.handler.DebugProfileStop(ctx, req.Req)
+
+ if err != nil {
+ switch v := err.(type) {
+ case *Error:
+ if v == nil {
+ return false, nil, fmt.Errorf("Handler for err returned non-nil error type *Error but nil value")
+ }
+ res.Err = v
+ default:
+ return false, nil, err
+ }
+ } else {
+ res.Success = r
+ }
+
+ return err == nil, &res, nil
+}
+
func (s *tchanNodeServer) handleFetch(ctx thrift.Context, protocol athrift.TProtocol) (bool, athrift.TStruct, error) {
var req NodeFetchArgs
var res NodeFetchResult
diff --git a/src/dbnode/integration/admin_session_fetch_blocks_test.go b/src/dbnode/integration/admin_session_fetch_blocks_test.go
index 0b64debbc1..9644c95885 100644
--- a/src/dbnode/integration/admin_session_fetch_blocks_test.go
+++ b/src/dbnode/integration/admin_session_fetch_blocks_test.go
@@ -54,29 +54,29 @@ func testAdminSessionFetchBlocksFromPeers(t *testing.T, setTestOpts setTestOptio
}
// Test setup
- testOpts := newTestOptions(t)
+ testOpts := NewTestOptions(t)
if setTestOpts != nil {
testOpts = setTestOpts(t, testOpts)
}
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
- md := testSetup.namespaceMetadataOrFail(testNamespaces[0])
+ md := testSetup.NamespaceMetadataOrFail(testNamespaces[0])
blockSize := md.Options().RetentionOptions().BlockSize()
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
- require.NoError(t, testSetup.startServer())
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
// Write test data
- now := testSetup.getNowFn()
+ now := testSetup.NowFn()()
seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
inputData := []generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now},
@@ -87,17 +87,17 @@ func testAdminSessionFetchBlocksFromPeers(t *testing.T, setTestOpts setTestOptio
}
for _, input := range inputData {
start := input.Start
- testSetup.setNowFn(start)
+ testSetup.SetNowFn(start)
testData := generate.Block(input)
seriesMaps[xtime.ToUnixNano(start)] = testData
- require.NoError(t, testSetup.writeBatch(testNamespaces[0], testData))
+ require.NoError(t, testSetup.WriteBatch(testNamespaces[0], testData))
}
log.Debug("test data is now written")
// Advance time and sleep for a long enough time so data blocks are sealed during ticking
- testSetup.setNowFn(testSetup.getNowFn().Add(blockSize * 2))
- later := testSetup.getNowFn()
- testSetup.sleepFor10xTickMinimumInterval()
+ testSetup.SetNowFn(testSetup.NowFn()().Add(blockSize * 2))
+ later := testSetup.NowFn()()
+ testSetup.SleepFor10xTickMinimumInterval()
metadatasByShard := testSetupMetadatas(t, testSetup, testNamespaces[0], now, later)
observedSeriesMaps := testSetupToSeriesMaps(t, testSetup, md, metadatasByShard)
@@ -108,23 +108,23 @@ func testAdminSessionFetchBlocksFromPeers(t *testing.T, setTestOpts setTestOptio
func testSetupMetadatas(
t *testing.T,
- testSetup *testSetup,
+ testSetup TestSetup,
namespace ident.ID,
start time.Time,
end time.Time,
) map[uint32][]block.ReplicaMetadata {
// Retrieve written data using the AdminSession APIs
// FetchMetadataBlocksFromPeers/FetchBlocksFromPeers
- adminClient := testSetup.m3dbVerificationAdminClient
+ adminClient := testSetup.M3DBVerificationAdminClient()
level := topology.ReadConsistencyLevelMajority
metadatasByShard, err := m3dbClientFetchBlocksMetadata(adminClient,
- namespace, testSetup.shardSet.AllIDs(), start, end, level)
+ namespace, testSetup.ShardSet().AllIDs(), start, end, level)
require.NoError(t, err)
return metadatasByShard
}
func filterSeriesByShard(
- testSetup *testSetup,
+ testSetup TestSetup,
seriesMap map[xtime.UnixNano]generate.SeriesBlock,
desiredShards []uint32,
) map[xtime.UnixNano]generate.SeriesBlock {
@@ -132,7 +132,7 @@ func filterSeriesByShard(
for blockStart, series := range seriesMap {
filteredSeries := make([]generate.Series, 0, len(series))
for _, serie := range series {
- shard := testSetup.shardSet.Lookup(serie.ID)
+ shard := testSetup.ShardSet().Lookup(serie.ID)
for _, ss := range desiredShards {
if ss == shard {
filteredSeries = append(filteredSeries, serie)
@@ -200,16 +200,16 @@ func verifySeriesMapsEqual(
func testSetupToSeriesMaps(
t *testing.T,
- testSetup *testSetup,
+ testSetup TestSetup,
nsMetadata namespace.Metadata,
metadatasByShard map[uint32][]block.ReplicaMetadata,
) map[xtime.UnixNano]generate.SeriesBlock {
blockSize := nsMetadata.Options().RetentionOptions().BlockSize()
seriesMap := make(map[xtime.UnixNano]generate.SeriesBlock)
- resultOpts := newDefaulTestResultOptions(testSetup.storageOpts)
- consistencyLevel := testSetup.storageOpts.RepairOptions().RepairConsistencyLevel()
- iterPool := testSetup.storageOpts.ReaderIteratorPool()
- session, err := testSetup.m3dbVerificationAdminClient.DefaultAdminSession()
+ resultOpts := newDefaulTestResultOptions(testSetup.StorageOpts())
+ consistencyLevel := testSetup.StorageOpts().RepairOptions().RepairConsistencyLevel()
+ iterPool := testSetup.StorageOpts().ReaderIteratorPool()
+ session, err := testSetup.M3DBVerificationAdminClient().DefaultAdminSession()
require.NoError(t, err)
require.NotNil(t, session)
nsCtx := namespace.NewContextFrom(nsMetadata)
diff --git a/src/dbnode/integration/bootstrap_after_buffer_rotation_regression_test.go b/src/dbnode/integration/bootstrap_after_buffer_rotation_regression_test.go
index 17a38e70ee..1d6cd5c784 100644
--- a/src/dbnode/integration/bootstrap_after_buffer_rotation_regression_test.go
+++ b/src/dbnode/integration/bootstrap_after_buffer_rotation_regression_test.go
@@ -34,6 +34,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
bcl "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/commitlog"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -68,24 +69,24 @@ func TestBootstrapAfterBufferRotation(t *testing.T) {
)
ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(ropts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1})
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- setup.mustSetTickMinimumInterval(100 * time.Millisecond)
+ setup.MustSetTickMinimumInterval(100 * time.Millisecond)
// Setup the commitlog and write a single datapoint into it one second into the
// active block.
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
testID := ident.StringID("foo")
- now := setup.getNowFn().Truncate(blockSize)
- setup.setNowFn(now)
+ now := setup.NowFn()().Truncate(blockSize)
+ setup.SetNowFn(now)
startTime := now
commitlogWrite := ts.Datapoint{
Timestamp: startTime.Add(time.Second),
@@ -105,12 +106,12 @@ func TestBootstrapAfterBufferRotation(t *testing.T) {
// which does not bootstrap any data, but simply waits until it is signaled, allowing us
// to delay bootstrap completion until after series buffer drain/rotation. After the custom
// test bootstrapper completes, the commitlog bootstrapper will run.
- bootstrapOpts := newDefaulTestResultOptions(setup.storageOpts)
+ bootstrapOpts := newDefaulTestResultOptions(setup.StorageOpts())
bootstrapCommitlogOpts := bcl.NewOptions().
SetResultOptions(bootstrapOpts).
SetCommitLogOptions(commitLogOpts).
SetRuntimeOptionsManager(runtime.NewOptionsManager())
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
+ fsOpts := setup.StorageOpts().CommitLogOptions().FilesystemOptions()
commitlogBootstrapperProvider, err := bcl.NewCommitLogBootstrapperProvider(
bootstrapCommitlogOpts, mustInspectFilesystem(fsOpts), nil)
require.NoError(t, err)
@@ -122,6 +123,7 @@ func TestBootstrapAfterBufferRotation(t *testing.T) {
test := newTestBootstrapperSource(testBootstrapperSourceOptions{
read: func(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
<-signalCh
@@ -131,18 +133,18 @@ func TestBootstrapAfterBufferRotation(t *testing.T) {
if err != nil {
return bootstrap.NamespaceResults{}, err
}
- return bs.Bootstrap(namespaces)
+ return bs.Bootstrap(ctx, namespaces)
},
}, bootstrapOpts, bs)
processOpts := bootstrap.NewProcessOptions().
SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
+ SetOrigin(setup.Origin())
processProvider, err := bootstrap.NewProcessProvider(
test, processOpts, bootstrapOpts)
require.NoError(t, err)
- setup.storageOpts = setup.storageOpts.SetBootstrapProcessProvider(processProvider)
+ setup.SetStorageOpts(setup.StorageOpts().SetBootstrapProcessProvider(processProvider))
// Start a background goroutine which will wait until the server is started,
// issue a single write into the active block, change the time to be far enough
@@ -151,16 +153,16 @@ func TestBootstrapAfterBufferRotation(t *testing.T) {
var memoryWrite ts.Datapoint
go func() {
// Wait for server to start
- setup.waitUntilServerIsUp()
+ setup.WaitUntilServerIsUp()
now = now.Add(blockSize)
- setup.setNowFn(now)
+ setup.SetNowFn(now)
memoryWrite = ts.Datapoint{
Timestamp: now.Add(-10 * time.Second),
Value: 2,
}
// Issue the write (still in the same block as the commitlog write).
- err := setup.writeBatch(ns1.ID(), generate.SeriesBlock{
+ err := setup.WriteBatch(ns1.ID(), generate.SeriesBlock{
generate.Series{
ID: ident.StringID("foo"),
Data: []generate.TestValue{{Datapoint: memoryWrite}},
@@ -172,18 +174,18 @@ func TestBootstrapAfterBufferRotation(t *testing.T) {
// Change the time far enough into the next block that a series buffer
// rotation will occur for the previously active block.
now = now.Add(ropts.BufferPast()).Add(time.Second)
- setup.setNowFn(now)
- setup.sleepFor10xTickMinimumInterval()
+ setup.SetNowFn(now)
+ setup.SleepFor10xTickMinimumInterval()
// Twice because the test bootstrapper will need to run two times, once to fulfill
// all historical blocks and once to fulfill the active block.
signalCh <- struct{}{}
signalCh <- struct{}{}
}()
- require.NoError(t, setup.startServer()) // Blocks until bootstrap is complete
+ require.NoError(t, setup.StartServer()) // Blocks until bootstrap is complete
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
}()
// Verify in-memory data match what we expect - both commitlog and memory write
diff --git a/src/dbnode/integration/bootstrap_before_buffer_rotation_no_tick_regression_test.go b/src/dbnode/integration/bootstrap_before_buffer_rotation_no_tick_regression_test.go
index 80ef12e351..7b6870d6d7 100644
--- a/src/dbnode/integration/bootstrap_before_buffer_rotation_no_tick_regression_test.go
+++ b/src/dbnode/integration/bootstrap_before_buffer_rotation_no_tick_regression_test.go
@@ -34,6 +34,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
bcl "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/commitlog"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -82,24 +83,24 @@ func TestBootstrapBeforeBufferRotationNoTick(t *testing.T) {
)
ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(ropts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1})
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- setup.mustSetTickMinimumInterval(100 * time.Millisecond)
+ setup.MustSetTickMinimumInterval(100 * time.Millisecond)
// Setup the commitlog and write a single datapoint into it one second into the
// active block.
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
testID := ident.StringID("foo")
- now := setup.getNowFn().Truncate(blockSize)
- setup.setNowFn(now)
+ now := setup.NowFn()().Truncate(blockSize)
+ setup.SetNowFn(now)
startTime := now
commitlogWrite := ts.Datapoint{
Timestamp: startTime.Add(time.Second),
@@ -119,12 +120,12 @@ func TestBootstrapBeforeBufferRotationNoTick(t *testing.T) {
// which does not bootstrap any data, but simply waits until it is signaled, allowing us
// to delay bootstrap completion until we've forced a tick to "hang". After the custom
// test bootstrapper completes, the commitlog bootstrapper will run.
- bootstrapOpts := newDefaulTestResultOptions(setup.storageOpts)
+ bootstrapOpts := newDefaulTestResultOptions(setup.StorageOpts())
bootstrapCommitlogOpts := bcl.NewOptions().
SetResultOptions(bootstrapOpts).
SetCommitLogOptions(commitLogOpts).
SetRuntimeOptionsManager(runtime.NewOptionsManager())
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
+ fsOpts := setup.StorageOpts().CommitLogOptions().FilesystemOptions()
commitlogBootstrapperProvider, err := bcl.NewCommitLogBootstrapperProvider(
bootstrapCommitlogOpts, mustInspectFilesystem(fsOpts), nil)
require.NoError(t, err)
@@ -136,6 +137,7 @@ func TestBootstrapBeforeBufferRotationNoTick(t *testing.T) {
test := newTestBootstrapperSource(testBootstrapperSourceOptions{
read: func(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
<-signalCh
@@ -145,16 +147,16 @@ func TestBootstrapBeforeBufferRotationNoTick(t *testing.T) {
if err != nil {
return bootstrap.NamespaceResults{}, err
}
- return bs.Bootstrap(namespaces)
+ return bs.Bootstrap(ctx, namespaces)
},
}, bootstrapOpts, bs)
processOpts := bootstrap.NewProcessOptions().
SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
+ SetOrigin(setup.Origin())
process, err := bootstrap.NewProcessProvider(test, processOpts, bootstrapOpts)
require.NoError(t, err)
- setup.storageOpts = setup.storageOpts.SetBootstrapProcessProvider(process)
+ setup.SetStorageOpts(setup.StorageOpts().SetBootstrapProcessProvider(process))
// Start a background goroutine which will wait until the server is started,
// issue a single write into the active block, change the time to be far enough
@@ -162,15 +164,15 @@ func TestBootstrapBeforeBufferRotationNoTick(t *testing.T) {
// can be rotated, and then signals the test bootstrapper that it can proceed.
go func() {
// Wait for server to start
- setup.waitUntilServerIsUp()
+ setup.WaitUntilServerIsUp()
// Set the time such that the (previously) active block is ready to be flushed.
now = now.Add(blockSize).Add(ropts.BufferPast()).Add(time.Second)
- setup.setNowFn(now)
+ setup.SetNowFn(now)
// Set the tick interval to be so large we can "hang" a tick at the end, preventing
// it from completing until we're ready to "resume" it later.
- runtimeMgr := setup.storageOpts.RuntimeOptionsManager()
+ runtimeMgr := setup.StorageOpts().RuntimeOptionsManager()
existingOptions := runtimeMgr.Get()
newOptions := existingOptions.SetTickMinimumInterval(2 * time.Hour)
@@ -194,16 +196,16 @@ func TestBootstrapBeforeBufferRotationNoTick(t *testing.T) {
signalCh <- struct{}{}
signalCh <- struct{}{}
}()
- require.NoError(t, setup.startServer()) // Blocks until bootstrap is complete
+ require.NoError(t, setup.StartServer()) // Blocks until bootstrap is complete
// Now that bootstrapping has completed, re-enable ticking so that flushing can take place
- setup.mustSetTickMinimumInterval(100 * time.Millisecond)
+ setup.MustSetTickMinimumInterval(100 * time.Millisecond)
// Wait for a flush to complete
- setup.sleepFor10xTickMinimumInterval()
+ setup.SleepFor10xTickMinimumInterval()
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
}()
// Verify in-memory data match what we expect - commitlog write should not be lost
diff --git a/src/dbnode/integration/bootstrap_helpers.go b/src/dbnode/integration/bootstrap_helpers.go
index f11a4a3ac2..3f11b2d8d0 100644
--- a/src/dbnode/integration/bootstrap_helpers.go
+++ b/src/dbnode/integration/bootstrap_helpers.go
@@ -26,13 +26,11 @@ import (
"testing"
"github.com/m3db/m3/src/dbnode/namespace"
- "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
- "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- bcl "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/commitlog"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
+ "github.com/m3db/m3/src/x/context"
"github.com/stretchr/testify/require"
)
@@ -63,7 +61,7 @@ func newTestBootstrapperSource(
if opts.read != nil {
src.read = opts.read
} else {
- src.read = func(namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error) {
+ src.read = func(ctx context.Context, namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error) {
return bootstrap.NewNamespaceResults(namespaces), nil
}
}
@@ -100,7 +98,7 @@ type testBootstrapper struct {
type testBootstrapperSourceOptions struct {
availableData func(namespace.Metadata, result.ShardTimeRanges, bootstrap.RunOptions) (result.ShardTimeRanges, error)
availableIndex func(namespace.Metadata, result.ShardTimeRanges, bootstrap.RunOptions) (result.ShardTimeRanges, error)
- read func(namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error)
+ read func(ctx context.Context, namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error)
}
var _ bootstrap.Source = &testBootstrapperSource{}
@@ -108,7 +106,7 @@ var _ bootstrap.Source = &testBootstrapperSource{}
type testBootstrapperSource struct {
availableData func(namespace.Metadata, result.ShardTimeRanges, bootstrap.RunOptions) (result.ShardTimeRanges, error)
availableIndex func(namespace.Metadata, result.ShardTimeRanges, bootstrap.RunOptions) (result.ShardTimeRanges, error)
- read func(namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error)
+ read func(ctx context.Context, namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error)
}
func (t testBootstrapperSource) AvailableData(
@@ -128,9 +126,10 @@ func (t testBootstrapperSource) AvailableIndex(
}
func (t testBootstrapperSource) Read(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
- return t.read(namespaces)
+ return t.read(ctx, namespaces)
}
func (t testBootstrapperSource) String() string {
@@ -138,30 +137,12 @@ func (t testBootstrapperSource) String() string {
}
func setupCommitLogBootstrapperWithFSInspection(
- t *testing.T, setup *testSetup, commitLogOpts commitlog.Options) {
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := newDefaulTestResultOptions(setup.storageOpts)
- bclOpts := bcl.NewOptions().
- SetResultOptions(bsOpts).
- SetCommitLogOptions(commitLogOpts).
- SetRuntimeOptionsManager(runtime.NewOptionsManager())
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
- bs, err := bcl.NewCommitLogBootstrapperProvider(
- bclOpts, mustInspectFilesystem(fsOpts), noOpAll)
- require.NoError(t, err)
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- process, err := bootstrap.NewProcessProvider(bs, processOpts, bsOpts)
- require.NoError(t, err)
- setup.storageOpts = setup.storageOpts.SetBootstrapProcessProvider(process)
-}
-
-func mustInspectFilesystem(fsOpts fs.Options) fs.Inspection {
- inspection, err := fs.InspectFilesystem(fsOpts)
- if err != nil {
- panic(err)
- }
-
- return inspection
+ t *testing.T,
+ setup TestSetup,
+ commitLogOpts commitlog.Options,
+) {
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ CommitLogOptions: commitLogOpts,
+ WithCommitLog: true,
+ }))
}
diff --git a/src/dbnode/integration/client.go b/src/dbnode/integration/client.go
index 5366dad338..841a2d772b 100644
--- a/src/dbnode/integration/client.go
+++ b/src/dbnode/integration/client.go
@@ -41,19 +41,44 @@ import (
"github.com/uber/tchannel-go/thrift"
)
-func tchannelClient(address string) (*tchannel.Channel, rpc.TChanNode, error) {
- channel, err := tchannel.NewChannel("integration-test", nil)
+// TestTChannelClient is a test only TChannel client that exposes db methods.
+type TestTChannelClient struct {
+ address string
+ channel *tchannel.Channel
+ name string
+ node rpc.TChanNode
+}
+
+// NewTChannelClient creates a new client on the given address.
+func NewTChannelClient(name, address string) (*TestTChannelClient, error) {
+ channel, err := tchannel.NewChannel(name, nil)
if err != nil {
- return nil, nil, err
+ return &TestTChannelClient{}, err
}
+
endpoint := &thrift.ClientOptions{HostPort: address}
thriftClient := thrift.NewClient(channel, nchannel.ChannelName, endpoint)
client := rpc.NewTChanNodeClient(thriftClient)
- return channel, client, nil
+ return &TestTChannelClient{
+ name: name,
+ address: address,
+ channel: channel,
+ node: client,
+ }, nil
+}
+
+// TChannelClientWrite writes a datapoint using a tchannel client.
+func (client *TestTChannelClient) TChannelClientWrite(
+ timeout time.Duration, req *rpc.WriteRequest,
+) error {
+ ctx, _ := thrift.NewContext(timeout)
+ return client.node.Write(ctx, req)
}
-// tchannelClientWriteBatch writes a data map using a tchannel client.
-func tchannelClientWriteBatch(client rpc.TChanNode, timeout time.Duration, namespace ident.ID, seriesList generate.SeriesBlock) error {
+// TChannelClientWriteBatch writes a data map using a tchannel client.
+func (client *TestTChannelClient) TChannelClientWriteBatch(
+ timeout time.Duration, namespace ident.ID, seriesList generate.SeriesBlock,
+) error {
var elems []*rpc.WriteBatchRawRequestElement
for _, series := range seriesList {
for _, dp := range series.Data {
@@ -75,33 +100,35 @@ func tchannelClientWriteBatch(client rpc.TChanNode, timeout time.Duration, names
NameSpace: namespace.Bytes(),
Elements: elems,
}
- return client.WriteBatchRaw(ctx, batchReq)
+ return client.node.WriteBatchRaw(ctx, batchReq)
}
-// tchannelClientFetch fulfills a fetch request using a tchannel client.
-func tchannelClientFetch(client rpc.TChanNode, timeout time.Duration, req *rpc.FetchRequest) ([]generate.TestValue, error) {
+// TChannelClientFetch fulfills a fetch request using a tchannel client.
+func (client *TestTChannelClient) TChannelClientFetch(
+ timeout time.Duration, req *rpc.FetchRequest,
+) (*rpc.FetchResult_, error) {
ctx, _ := thrift.NewContext(timeout)
- fetched, err := client.Fetch(ctx, req)
- if err != nil {
- return nil, err
- }
- dp := toDatapoints(fetched)
- return dp, nil
+ return client.node.Fetch(ctx, req)
}
-// tchannelClientTruncate fulfills a namespace truncation request using a tchannel client.
-func tchannelClientTruncate(client rpc.TChanNode, timeout time.Duration, req *rpc.TruncateRequest) (int64, error) {
+// TChannelClientTruncate fulfills a namespace truncation request using a tchannel client.
+func (client *TestTChannelClient) TChannelClientTruncate(
+ timeout time.Duration, req *rpc.TruncateRequest,
+) (int64, error) {
ctx, _ := thrift.NewContext(timeout)
- truncated, err := client.Truncate(ctx, req)
+ truncated, err := client.node.Truncate(ctx, req)
if err != nil {
return 0, err
}
return truncated.NumSeries, nil
}
-func tchannelClientHealth(client rpc.TChanNode) (*rpc.NodeHealthResult_, error) {
- ctx, _ := thrift.NewContext(5 * time.Second)
- return client.Health(ctx)
+// TChannelClientHealth fulfills a client health request using a tchannel client.
+func (client *TestTChannelClient) TChannelClientHealth(
+ timeout time.Duration,
+) (*rpc.NodeHealthResult_, error) {
+ ctx, _ := thrift.NewContext(timeout)
+ return client.node.Health(ctx)
}
func m3dbAdminClient(opts client.AdminOptions) (client.AdminClient, error) {
diff --git a/src/dbnode/integration/cluster_add_one_node_test.go b/src/dbnode/integration/cluster_add_one_node_test.go
index 5981b5492a..d2b4c1beb9 100644
--- a/src/dbnode/integration/cluster_add_one_node_test.go
+++ b/src/dbnode/integration/cluster_add_one_node_test.go
@@ -68,7 +68,7 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
SetBufferPast(10*time.Minute).
SetBufferFuture(2*time.Minute)))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Prevent snapshotting from happening too frequently to allow for the
// possibility of a snapshot occurring after the shard set is assigned,
@@ -159,7 +159,7 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
}
var (
- now = setups[0].getNowFn()
+ now = setups[0].NowFn()()
blockStart = now
blockSize = namesp.Options().RetentionOptions().BlockSize()
seriesMaps = generate.BlocksByStart([]generate.BlockConfig{
@@ -203,17 +203,17 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
require.Equal(t, 1, len(expectedSeriesIDs[1]))
// Start the first server with filesystem bootstrapper.
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
// Start the last server with peers and filesystem bootstrappers, no shards
// are assigned at first.
- require.NoError(t, setups[1].startServer())
+ require.NoError(t, setups[1].StartServer())
log.Debug("servers are now up")
// Stop the servers on test completion.
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
@@ -227,7 +227,7 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
time.Sleep(time.Second)
for _, setup := range setups {
now = now.Add(time.Second)
- setup.setNowFn(now)
+ setup.SetNowFn(now)
}
}
}()
@@ -261,7 +261,7 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
doneWritingWhilePeerStreaming := make(chan struct{})
go func() {
for _, testData := range seriesReceivedDuringPeerStreaming {
- err := setups[1].writeBatch(namesp.ID(), testData)
+ err := setups[1].WriteBatch(namesp.ID(), testData)
// We expect consistency errors because we're only running with
// R.F = 2 and one node is leaving and one node is joining for
// each of the shards that is changing hands.
@@ -273,7 +273,7 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
}()
log.Debug("waiting for shards to be bootstrapped")
- waitUntilHasBootstrappedShardsExactly(setups[1].db, testutil.Uint32Range(midShard+1, maxShard))
+ waitUntilHasBootstrappedShardsExactly(setups[1].DB(), testutil.Uint32Range(midShard+1, maxShard))
log.Debug("waiting for background writes to complete")
<-doneWritingWhilePeerStreaming
@@ -315,8 +315,8 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
log.Debug("resharding to shed shards from first node")
svc.SetInstances(instances.added)
svcs.NotifyServiceUpdate("m3db")
- waitUntilHasBootstrappedShardsExactly(setups[0].db, testutil.Uint32Range(minShard, midShard))
- waitUntilHasBootstrappedShardsExactly(setups[1].db, testutil.Uint32Range(midShard+1, maxShard))
+ waitUntilHasBootstrappedShardsExactly(setups[0].DB(), testutil.Uint32Range(minShard, midShard))
+ waitUntilHasBootstrappedShardsExactly(setups[1].DB(), testutil.Uint32Range(midShard+1, maxShard))
log.Debug("verifying data in servers matches expected data set")
@@ -336,11 +336,11 @@ func testClusterAddOneNode(t *testing.T, verifyCommitlogCanBootstrapAfterNodeJoi
// bootstrappable from the commitlog bootstrapper.
// Reset the topology initializer as the M3DB session will have closed it.
- require.NoError(t, setups[1].stopServer())
+ require.NoError(t, setups[1].StopServer())
topoOpts := topology.NewDynamicOptions().
SetConfigServiceClient(fake.NewM3ClusterClient(svcs, nil))
topoInit := topology.NewDynamicInitializer(topoOpts)
- setups[1].topoInit = topoInit
+ setups[1].SetTopologyInitializer(topoInit)
// Start the server that performed peer streaming with only the filesystem and
// commitlog bootstrapper and make sure it has all the expected data.
diff --git a/src/dbnode/integration/commitlog_bootstrap_coldwrites_test.go b/src/dbnode/integration/commitlog_bootstrap_coldwrites_test.go
index 8794ef028c..f85d48e4e0 100644
--- a/src/dbnode/integration/commitlog_bootstrap_coldwrites_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_coldwrites_test.go
@@ -56,25 +56,25 @@ func testCommitLogBootstrapColdWrites(t *testing.T, setTestOpts setTestOptions,
SetRetentionOptions(ropts).
SetColdWritesEnabled(true))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1})
if setTestOpts != nil {
opts = setTestOpts(t, opts)
ns1 = opts.Namespaces()[0]
}
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log bootstrap test")
- start := setup.getNowFn()
+ start := setup.NowFn()()
log.Info("writing data files")
dataFilesData := []generate.BlockConfig{
@@ -113,14 +113,14 @@ func testCommitLogBootstrapColdWrites(t *testing.T, setTestOpts setTestOptions,
// Setup bootstrapper after writing data so filesystem inspection can find it.
setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
- setup.setNowFn(start)
+ setup.SetNowFn(start)
// Start the server with filesystem bootstrapper
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
diff --git a/src/dbnode/integration/commitlog_bootstrap_helpers.go b/src/dbnode/integration/commitlog_bootstrap_helpers.go
index 4c821abca7..0bc98de968 100644
--- a/src/dbnode/integration/commitlog_bootstrap_helpers.go
+++ b/src/dbnode/integration/commitlog_bootstrap_helpers.go
@@ -101,7 +101,7 @@ func generateSeriesMaps(numBlocks int, updateConfig generate.UpdateBlockConfig,
func writeCommitLogData(
t *testing.T,
- s *testSetup,
+ s TestSetup,
opts commitlog.Options,
data generate.SeriesBlocksByStart,
namespace namespace.Metadata,
@@ -112,7 +112,7 @@ func writeCommitLogData(
func writeCommitLogDataSpecifiedTS(
t *testing.T,
- s *testSetup,
+ s TestSetup,
opts commitlog.Options,
data generate.SeriesBlocksByStart,
namespace namespace.Metadata,
@@ -124,7 +124,7 @@ func writeCommitLogDataSpecifiedTS(
func writeCommitLogDataWithPredicate(
t *testing.T,
- s *testSetup,
+ s TestSetup,
opts commitlog.Options,
data generate.SeriesBlocksByStart,
namespace namespace.Metadata,
@@ -135,7 +135,7 @@ func writeCommitLogDataWithPredicate(
func writeCommitLogDataBase(
t *testing.T,
- s *testSetup,
+ s TestSetup,
opts commitlog.Options,
data generate.SeriesBlocksByStart,
namespace namespace.Metadata,
@@ -151,16 +151,18 @@ func writeCommitLogDataBase(
t, defaultIntegrationTestFlushInterval, opts.FlushInterval())
var (
- seriesLookup = newCommitLogSeriesStates(data)
- shardSet = s.shardSet
+ seriesLookup = newCommitLogSeriesStates(data)
+ shardSet = s.ShardSet()
+ tagEncoderPool = opts.FilesystemOptions().TagEncoderPool()
+ tagSliceIter = ident.NewTagsIterator(ident.Tags{})
)
// Write out commit log data.
for currTs, blk := range data {
if specifiedTS != nil {
- s.setNowFn(*specifiedTS)
+ s.SetNowFn(*specifiedTS)
} else {
- s.setNowFn(currTs.ToTime())
+ s.SetNowFn(currTs.ToTime())
}
ctx := context.NewContext()
defer ctx.Close()
@@ -182,11 +184,21 @@ func writeCommitLogDataBase(
for _, point := range points {
series, ok := seriesLookup[point.ID.String()]
require.True(t, ok)
+
+ tagSliceIter.Reset(series.tags)
+
+ tagEncoder := tagEncoderPool.Get()
+ err := tagEncoder.Encode(tagSliceIter)
+ require.NoError(t, err)
+
+ encodedTagsChecked, ok := tagEncoder.Data()
+ require.True(t, ok)
+
cID := ts.Series{
Namespace: namespace.ID(),
Shard: shardSet.Lookup(point.ID),
ID: point.ID,
- Tags: series.tags,
+ EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()),
UniqueIndex: series.uniqueIndex,
}
if pred(point.Value) {
@@ -201,7 +213,7 @@ func writeCommitLogDataBase(
func writeSnapshotsWithPredicate(
t *testing.T,
- s *testSetup,
+ s TestSetup,
opts commitlog.Options,
data generate.SeriesBlocksByStart,
volume int,
diff --git a/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go b/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go
index 7d4a2dcf3e..5d056e9750 100644
--- a/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go
@@ -71,20 +71,20 @@ func TestCommitLogIndexPerfSpeedBootstrap(t *testing.T) {
SetBlockSize(2 * blockSize))
ns, err := namespace.NewMetadata(testNamespaces[0], nsOpts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns}).
// Allow for wall clock timing
SetNowFn(time.Now)
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log bootstrap test")
// Write test data
@@ -137,7 +137,7 @@ func TestCommitLogIndexPerfSpeedBootstrap(t *testing.T) {
log.Info("writing data")
- now := setup.getNowFn()
+ now := setup.NowFn()()
blockStart := now.Add(-3 * blockSize)
// create new commit log
@@ -145,19 +145,21 @@ func TestCommitLogIndexPerfSpeedBootstrap(t *testing.T) {
require.NoError(t, err)
require.NoError(t, commitLog.Open())
- // NB(r): Write points using no up front series metadata or point
- // generation so that the memory usage is constant during the write phase
ctx := context.NewContext()
defer ctx.Close()
- shardSet := setup.shardSet
+
+ shardSet := setup.ShardSet()
idPrefix := "test.id.test.id.test.id.test.id.test.id.test.id.test.id.test.id"
idPrefixBytes := []byte(idPrefix)
- checkedBytes := checked.NewBytes(nil, nil)
- seriesID := ident.BinaryID(checkedBytes)
numBytes := make([]byte, 8)
numHexBytes := make([]byte, hex.EncodedLen(len(numBytes)))
+ tagEncoderPool := commitLogOpts.FilesystemOptions().TagEncoderPool()
+ tagSliceIter := ident.NewTagsIterator(ident.Tags{})
for i := 0; i < numPoints; i++ {
for j := 0; j < numSeries; j++ {
+ checkedBytes := checked.NewBytes(nil, nil)
+ seriesID := ident.BinaryID(checkedBytes)
+
// Write the ID prefix
checkedBytes.Resize(0)
checkedBytes.AppendAll(idPrefixBytes)
@@ -171,11 +173,19 @@ func TestCommitLogIndexPerfSpeedBootstrap(t *testing.T) {
// Use the tag sets appropriate for this series number
seriesTags := tagSets[j%len(tagSets)]
+ tagSliceIter.Reset(seriesTags)
+ tagEncoder := tagEncoderPool.Get()
+ err := tagEncoder.Encode(tagSliceIter)
+ require.NoError(t, err)
+
+ encodedTagsChecked, ok := tagEncoder.Data()
+ require.True(t, ok)
+
series := ts.Series{
Namespace: ns.ID(),
Shard: shardSet.Lookup(seriesID),
ID: seriesID,
- Tags: seriesTags,
+ EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()),
UniqueIndex: uint64(j),
}
dp := ts.Datapoint{
@@ -207,15 +217,15 @@ func TestCommitLogIndexPerfSpeedBootstrap(t *testing.T) {
setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
// restore now time so measurements take effect
- setup.storageOpts = setup.storageOpts.SetClockOptions(clock.NewOptions())
+ setup.SetStorageOpts(setup.StorageOpts().SetClockOptions(clock.NewOptions()))
// Start the server with filesystem bootstrapper
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
}
diff --git a/src/dbnode/integration/commitlog_bootstrap_index_test.go b/src/dbnode/integration/commitlog_bootstrap_index_test.go
index bf0eceaf7c..1302d4161f 100644
--- a/src/dbnode/integration/commitlog_bootstrap_index_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_index_test.go
@@ -57,23 +57,23 @@ func TestCommitLogIndexBootstrap(t *testing.T) {
require.NoError(t, err)
ns2, err := namespace.NewMetadata(testNamespaces[1], nsOpts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log bootstrap test")
// Write test data
log.Info("generating data")
- now := setup.getNowFn()
+ now := setup.NowFn()()
fooSeries := generate.Series{
ID: ident.StringID("foo"),
Tags: ident.NewTags(ident.StringTag("city", "new_york"), ident.StringTag("foo", "foo")),
@@ -134,14 +134,14 @@ func TestCommitLogIndexBootstrap(t *testing.T) {
// Setup bootstrapper after writing data so filesystem inspection can find it.
setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
- setup.setNowFn(now)
+ setup.SetNowFn(now)
// Start the server with filesystem bootstrapper
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
@@ -151,7 +151,7 @@ func TestCommitLogIndexBootstrap(t *testing.T) {
verifySeriesMaps(t, setup, testNamespaces[1], seriesMaps)
// Issue some index queries
- session, err := setup.m3dbClient.DefaultSession()
+ session, err := setup.M3DBClient().DefaultSession()
require.NoError(t, err)
start := now.Add(-rOpts.RetentionPeriod())
diff --git a/src/dbnode/integration/commitlog_bootstrap_merge_test.go b/src/dbnode/integration/commitlog_bootstrap_merge_test.go
index 0c2b393bba..95004c842c 100644
--- a/src/dbnode/integration/commitlog_bootstrap_merge_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_merge_test.go
@@ -28,13 +28,7 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
- persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/dbnode/runtime"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- bcl "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/commitlog"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
xtime "github.com/m3db/m3/src/x/time"
"github.com/stretchr/testify/require"
@@ -70,24 +64,24 @@ func TestCommitLogAndFSMergeBootstrap(t *testing.T) {
)
ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(ns1ROpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1})
// Test setup
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log + fs merge bootstrap test")
// generate and write test data
var (
- t0 = setup.getNowFn()
+ t0 = setup.NowFn()()
t1 = t0.Add(ns1BlockSize)
t2 = t1.Add(ns1BlockSize)
t3 = t2.Add(ns1BlockSize)
@@ -118,49 +112,21 @@ func TestCommitLogAndFSMergeBootstrap(t *testing.T) {
}
writeCommitLogData(t, setup, commitLogOpts, commitlogSeriesMaps, ns1, false)
- // commit log bootstrapper (must be after writing out commitlog files so inspection finds files)
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := newDefaulTestResultOptions(setup.storageOpts)
- bclOpts := bcl.NewOptions().
- SetResultOptions(bsOpts).
- SetCommitLogOptions(commitLogOpts).
- SetRuntimeOptionsManager(runtime.NewOptionsManager())
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
-
- commitLogBootstrapper, err := bcl.NewCommitLogBootstrapperProvider(
- bclOpts, mustInspectFilesystem(fsOpts), noOpAll)
- require.NoError(t, err)
- // fs bootstrapper
- persistMgr, err := persistfs.NewPersistManager(fsOpts)
- require.NoError(t, err)
- storageIdxOpts := setup.storageOpts.IndexOptions()
- bfsOpts := fs.NewOptions().
- SetResultOptions(bsOpts).
- SetFilesystemOptions(fsOpts).
- SetIndexOptions(storageIdxOpts).
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
- SetPersistManager(persistMgr).
- SetCompactor(newCompactor(t, storageIdxOpts))
- fsBootstrapper, err := fs.NewFileSystemBootstrapperProvider(bfsOpts, commitLogBootstrapper)
- require.NoError(t, err)
- // bootstrapper storage opts
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- process, err := bootstrap.NewProcessProvider(
- fsBootstrapper, processOpts, bsOpts)
- require.NoError(t, err)
- setup.storageOpts = setup.storageOpts.SetBootstrapProcessProvider(process)
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ CommitLogOptions: commitLogOpts,
+ WithCommitLog: true,
+ WithFileSystem: true,
+ }))
log.Info("moving time forward and starting server")
- setup.setNowFn(t3)
+ setup.SetNowFn(t3)
// Start the server
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
diff --git a/src/dbnode/integration/commitlog_bootstrap_multi_ns_test.go b/src/dbnode/integration/commitlog_bootstrap_multi_ns_test.go
index b330b15cd4..0a7c1532cf 100644
--- a/src/dbnode/integration/commitlog_bootstrap_multi_ns_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_multi_ns_test.go
@@ -27,8 +27,8 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/integration/generate"
- "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
"github.com/stretchr/testify/require"
)
@@ -52,30 +52,30 @@ func TestCommitLogBootstrapMultipleNamespaces(t *testing.T) {
ns2, err := namespace.NewMetadata(testNamespaces[1], namespace.NewOptions().SetRetentionOptions(ns2ROpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
// Test setup
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
// Write test data for ns1
log.Info("generating data - ns1")
- now := setup.getNowFn()
+ now := setup.NowFn()()
ns1SeriesMap := generate.BlocksByStart([]generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 20, Start: now.Add(ns1BlockSize)},
{IDs: []string{"bar", "baz"}, NumPoints: 50, Start: now.Add(2 * ns1BlockSize)},
{IDs: []string{"and", "one"}, NumPoints: 40, Start: now.Add(3 * ns1BlockSize)},
})
- setup.namespaceMetadataOrFail(testNamespaces[0])
+ setup.NamespaceMetadataOrFail(testNamespaces[0])
log.Info("writing data - ns1")
writeCommitLogData(t, setup, commitLogOpts, ns1SeriesMap, ns1, false)
log.Info("written data - ns1")
@@ -88,7 +88,7 @@ func TestCommitLogBootstrapMultipleNamespaces(t *testing.T) {
{IDs: []string{"cat", "hax"}, NumPoints: 80, Start: now.Add(3 * ns2BlockSize)},
{IDs: []string{"why", "this"}, NumPoints: 40, Start: now.Add(4 * ns2BlockSize)},
})
- setup.namespaceMetadataOrFail(testNamespaces[1])
+ setup.NamespaceMetadataOrFail(testNamespaces[1])
log.Info("writing data - ns2")
writeCommitLogData(t, setup, commitLogOpts, ns2SeriesMap, ns2, false)
log.Info("written data - ns2")
@@ -97,19 +97,19 @@ func TestCommitLogBootstrapMultipleNamespaces(t *testing.T) {
setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
later := now.Add(4 * ns1BlockSize)
- setup.setNowFn(later)
+ setup.SetNowFn(later)
// Start the server with filesystem bootstrapper
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
log.Info("waiting until data is bootstrapped")
- bootstrapped := waitUntil(func() bool { return setup.db.IsBootstrapped() }, 20*time.Second)
+ bootstrapped := waitUntil(func() bool { return setup.DB().IsBootstrapped() }, 20*time.Second)
require.True(t, bootstrapped)
log.Info("data bootstrapped")
diff --git a/src/dbnode/integration/commitlog_bootstrap_only_reads_required_files_test.go b/src/dbnode/integration/commitlog_bootstrap_only_reads_required_files_test.go
index 847a304373..5b946921d5 100644
--- a/src/dbnode/integration/commitlog_bootstrap_only_reads_required_files_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_only_reads_required_files_test.go
@@ -26,8 +26,8 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
"github.com/stretchr/testify/require"
)
@@ -47,23 +47,23 @@ func TestCommitLogBootstrapOnlyReadsRequiredFiles(t *testing.T) {
)
ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(ropts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1})
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log bootstrap test")
// Write test data
log.Info("generating data")
- now := setup.getNowFn()
+ now := setup.NowFn()()
seriesMaps := generateSeriesMaps(30, nil, now.Add(-2*blockSize), now.Add(-blockSize))
log.Info("writing data")
writeCommitLogData(t, setup, commitLogOpts, seriesMaps, ns1, false)
@@ -91,14 +91,14 @@ func TestCommitLogBootstrapOnlyReadsRequiredFiles(t *testing.T) {
// Setup bootstrapper after writing data so filesystem inspection can find it.
setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
- setup.setNowFn(now)
+ setup.SetNowFn(now)
// Start the server with filesystem bootstrapper
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
diff --git a/src/dbnode/integration/commitlog_bootstrap_test.go b/src/dbnode/integration/commitlog_bootstrap_test.go
index 10af850eff..d6ab5e76be 100644
--- a/src/dbnode/integration/commitlog_bootstrap_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_test.go
@@ -27,8 +27,8 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/integration/generate"
- "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
"github.com/stretchr/testify/require"
)
@@ -55,7 +55,7 @@ func testCommitLogBootstrap(t *testing.T, setTestOpts setTestOptions, updateInpu
require.NoError(t, err)
ns2, err := namespace.NewMetadata(testNamespaces[1], namespace.NewOptions().SetRetentionOptions(ropts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
if setTestOpts != nil {
opts = setTestOpts(t, opts)
@@ -63,20 +63,20 @@ func testCommitLogBootstrap(t *testing.T, setTestOpts setTestOptions, updateInpu
ns2 = opts.Namespaces()[1]
}
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log bootstrap test")
// Write test data
log.Info("generating data")
- now := setup.getNowFn()
+ now := setup.NowFn()()
seriesMaps := generateSeriesMaps(30, updateInputConfig, now.Add(-2*blockSize), now.Add(-blockSize))
log.Info("writing data")
writeCommitLogData(t, setup, commitLogOpts, seriesMaps, ns1, false)
@@ -85,14 +85,14 @@ func testCommitLogBootstrap(t *testing.T, setTestOpts setTestOptions, updateInpu
// Setup bootstrapper after writing data so filesystem inspection can find it.
setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
- setup.setNowFn(now)
+ setup.SetNowFn(now)
// Start the server with filesystem bootstrapper
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
diff --git a/src/dbnode/integration/commitlog_bootstrap_unowned_shard_test.go b/src/dbnode/integration/commitlog_bootstrap_unowned_shard_test.go
index e815ee1c5a..74c4b19162 100644
--- a/src/dbnode/integration/commitlog_bootstrap_unowned_shard_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_unowned_shard_test.go
@@ -86,7 +86,7 @@ func TestCommitLogBootstrapUnownedShard(t *testing.T) {
SetConfigServiceClient(fake.NewM3ClusterClient(svcs, nil))
topoInit := topology.NewDynamicInitializer(topoOpts)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1}).
SetNumShards(numShards)
setupOpts := []bootstrappableTestSetupOptions{
@@ -100,12 +100,12 @@ func TestCommitLogBootstrapUnownedShard(t *testing.T) {
// Only set this up for the first setup because we're only writing commit
// logs for the first server.
setup := setups[0]
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
log.Info("generating data")
- now := setup.getNowFn()
+ now := setup.NowFn()()
seriesMaps := generateSeriesMaps(30, nil, now.Add(-2*blockSize), now.Add(-blockSize))
log.Info("writing data")
// Write commit log with generated data that spreads across all shards
@@ -120,20 +120,20 @@ func TestCommitLogBootstrapUnownedShard(t *testing.T) {
// Start the servers.
for _, setup := range setups {
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
}
// Defer stop the servers.
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
// Only fetch blocks for shards owned by node 0.
metadatasByShard, err := m3dbClientFetchBlocksMetadata(
- setup.m3dbVerificationAdminClient, testNamespaces[0], node0OwnedShards,
+ setup.M3DBVerificationAdminClient(), testNamespaces[0], node0OwnedShards,
now.Add(-2*blockSize), now, topology.ReadConsistencyLevelMajority)
require.NoError(t, err)
diff --git a/src/dbnode/integration/commitlog_bootstrap_with_snapshots_after_restart_test.go b/src/dbnode/integration/commitlog_bootstrap_with_snapshots_after_restart_test.go
new file mode 100644
index 0000000000..b0756493b8
--- /dev/null
+++ b/src/dbnode/integration/commitlog_bootstrap_with_snapshots_after_restart_test.go
@@ -0,0 +1,178 @@
+// +build integration
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package integration
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/integration/generate"
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/schema"
+ "github.com/m3db/m3/src/dbnode/retention"
+ xclock "github.com/m3db/m3/src/x/clock"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestCommitLogBootstrapWithSnapshotsAfterRestart(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow() // Just skip if we're doing a short run
+ }
+
+ // Test setup
+ var (
+ ropts = retention.NewOptions().SetRetentionPeriod(12 * time.Hour)
+ blockSize = ropts.BlockSize()
+ )
+ ns, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().
+ SetRetentionOptions(ropts).
+ SetColdWritesEnabled(true))
+ require.NoError(t, err)
+ opts := NewTestOptions(t).
+ SetNamespaces([]namespace.Metadata{ns}).
+ SetTickMinimumInterval(100 * time.Millisecond)
+
+ setup, err := NewTestSetup(t, opts, nil)
+ require.NoError(t, err)
+ defer setup.Close()
+
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
+ SetFlushInterval(defaultIntegrationTestFlushInterval)
+ setup.SetStorageOpts(setup.StorageOpts().
+ SetCommitLogOptions(commitLogOpts).
+ SetMediatorTickInterval(50 * time.Millisecond))
+
+ log := setup.StorageOpts().InstrumentOptions().Logger()
+ log.Info("commit log bootstrap with snapshots after restart test")
+
+ // Start the server with filesystem bootstrapper
+ require.NoError(t, setup.StartServer())
+ log.Debug("server is now up")
+
+ // Stop the server
+ defer func() {
+ require.NoError(t, setup.StopServer())
+ log.Debug("server is now down")
+ }()
+
+ // Write test data
+ log.Info("writing test data")
+ now := setup.NowFn()().Truncate(blockSize)
+ seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
+ inputData := []generate.BlockConfig{
+ {IDs: []string{"foo", "bar"}, NumPoints: 50, Start: now.Add(-5 * blockSize)},
+ {IDs: []string{"foo", "qux"}, NumPoints: 50, Start: now.Add(-4 * blockSize)},
+ {IDs: []string{"qux", "quux"}, NumPoints: 50, Start: now.Add(-3 * blockSize)},
+ {IDs: []string{"corge", "porgie"}, NumPoints: 50, Start: now.Add(-2 * blockSize)},
+ }
+ for _, input := range inputData {
+ testData := generate.Block(input)
+ seriesMaps[xtime.ToUnixNano(input.Start)] = testData
+ require.NoError(t, setup.WriteBatch(testNamespaces[0], testData))
+ }
+
+ // Wait until snapshots are on disk.
+ fsOpts := commitLogOpts.FilesystemOptions()
+ expectedNumSeries := 0
+ for _, data := range inputData {
+ expectedNumSeries += len(data.IDs)
+ }
+ xclock.WaitUntil(func() bool {
+ var totalNumEntries int
+ for _, numEntries := range getNumEntriesPerBlockStart(ns.ID(), opts.NumShards(), fsOpts) {
+ totalNumEntries += numEntries
+ }
+ return totalNumEntries == expectedNumSeries
+ }, time.Minute)
+
+ // Stop and restart server to allow bootstrapping from commit logs.
+ require.NoError(t, setup.StopServer())
+ // Setup commitlog bootstrapper after writing data so filesystem inspection can find it.
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ CommitLogOptions: commitLogOpts,
+ WithCommitLog: true,
+ // Also setup fs bootstrapper to be ensure correct behaviour on restart w/ fs bootstrapper enabled.
+ WithFileSystem: true,
+ }))
+ require.NoError(t, setup.StartServer())
+ log.Debug("server restarted")
+
+ // Verify that data is what we expect.
+ metadatasByShard := testSetupMetadatas(t, setup, testNamespaces[0], now.Add(-5*blockSize), now.Add(-blockSize))
+ observedSeriesMaps := testSetupToSeriesMaps(t, setup, ns, metadatasByShard)
+ verifySeriesMapsEqual(t, seriesMaps, observedSeriesMaps)
+
+ // Wait until empty snapshots are on disk.
+ xclock.WaitUntil(func() bool {
+ var totalNumEntries int
+ for _, numEntries := range getNumEntriesPerBlockStart(ns.ID(), opts.NumShards(), fsOpts) {
+ totalNumEntries += numEntries
+ }
+ return totalNumEntries == 0
+ }, time.Minute)
+
+ // Verify that data is still what we expect.
+ metadatasByShard = testSetupMetadatas(t, setup, testNamespaces[0], now.Add(-5*blockSize), now.Add(-blockSize))
+ observedSeriesMaps = testSetupToSeriesMaps(t, setup, ns, metadatasByShard)
+ verifySeriesMapsEqual(t, seriesMaps, observedSeriesMaps)
+}
+
+func getNumEntriesPerBlockStart(
+ nsID ident.ID,
+ numShards int,
+ fsOpts fs.Options,
+) map[xtime.UnixNano]int {
+ numEntriesPerBlockStart := make(map[xtime.UnixNano]int)
+ for shard := 0; shard < numShards; shard++ {
+ infoFiles := fs.ReadInfoFiles(
+ fsOpts.FilePathPrefix(),
+ nsID,
+ uint32(shard),
+ fsOpts.InfoReaderBufferSize(),
+ fsOpts.DecodingOptions(),
+ persist.FileSetSnapshotType,
+ )
+ // Grab the latest snapshot file for each blockstart.
+ latestSnapshotInfoPerBlockStart := make(map[xtime.UnixNano]schema.IndexInfo)
+ for _, f := range infoFiles {
+ info, ok := latestSnapshotInfoPerBlockStart[xtime.UnixNano(f.Info.BlockStart)]
+ if !ok {
+ latestSnapshotInfoPerBlockStart[xtime.UnixNano(f.Info.BlockStart)] = f.Info
+ continue
+ }
+
+ if f.Info.VolumeIndex > info.VolumeIndex {
+ latestSnapshotInfoPerBlockStart[xtime.UnixNano(f.Info.BlockStart)] = f.Info
+ }
+ }
+ for blockStart, info := range latestSnapshotInfoPerBlockStart {
+ numEntriesPerBlockStart[blockStart] += int(info.Entries)
+ }
+ }
+ return numEntriesPerBlockStart
+}
diff --git a/src/dbnode/integration/commitlog_bootstrap_with_snapshots_test.go b/src/dbnode/integration/commitlog_bootstrap_with_snapshots_test.go
index 7f301351d5..60c50d3d10 100644
--- a/src/dbnode/integration/commitlog_bootstrap_with_snapshots_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_with_snapshots_test.go
@@ -27,8 +27,8 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/integration/generate"
- "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
"github.com/stretchr/testify/require"
)
@@ -51,11 +51,15 @@ func testCommitLogBootstrapWithSnapshots(t *testing.T, setTestOpts setTestOption
ropts = retention.NewOptions().SetRetentionPeriod(12 * time.Hour)
blockSize = ropts.BlockSize()
)
- ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(ropts))
+ ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().
+ SetRetentionOptions(ropts).
+ SetColdWritesEnabled(true))
require.NoError(t, err)
- ns2, err := namespace.NewMetadata(testNamespaces[1], namespace.NewOptions().SetRetentionOptions(ropts))
+ ns2, err := namespace.NewMetadata(testNamespaces[1], namespace.NewOptions().
+ SetRetentionOptions(ropts).
+ SetColdWritesEnabled(true))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
if setTestOpts != nil {
@@ -64,22 +68,29 @@ func testCommitLogBootstrapWithSnapshots(t *testing.T, setTestOpts setTestOption
ns2 = opts.Namespaces()[1]
}
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- commitLogOpts := setup.storageOpts.CommitLogOptions().
+ commitLogOpts := setup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log bootstrap test")
// Write test data
log.Info("generating data")
var (
- now = setup.getNowFn().Truncate(blockSize)
- seriesMaps = generateSeriesMaps(30, updateInputConfig, now.Add(-2*blockSize), now.Add(-blockSize))
+ now = setup.NowFn()().Truncate(blockSize)
+ seriesMaps = generateSeriesMaps(
+ 100,
+ updateInputConfig,
+ now.Add(-4*blockSize),
+ now.Add(-3*blockSize),
+ now.Add(-2*blockSize),
+ now.Add(-blockSize),
+ )
)
log.Info("writing data")
@@ -98,7 +109,7 @@ func testCommitLogBootstrapWithSnapshots(t *testing.T, setTestOpts setTestOption
)
writeSnapshotsWithPredicate(
- t, setup, commitLogOpts, seriesMaps, 0,ns1, nil, pred, snapshotInterval)
+ t, setup, commitLogOpts, seriesMaps, 0, ns1, nil, pred, snapshotInterval)
numDatapointsNotInCommitLogs := 0
writeCommitLogDataWithPredicate(t, setup, commitLogOpts, seriesMaps, ns1, func(dp generate.TestValue) bool {
@@ -120,27 +131,27 @@ func testCommitLogBootstrapWithSnapshots(t *testing.T, setTestOpts setTestOption
// Setup bootstrapper after writing data so filesystem inspection can find it.
setupCommitLogBootstrapperWithFSInspection(t, setup, commitLogOpts)
- setup.setNowFn(now)
+ setup.SetNowFn(now)
// Start the server with filesystem bootstrapper
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
// Verify in-memory data match what we expect - all writes from seriesMaps
// should be present
- metadatasByShard := testSetupMetadatas(t, setup, testNamespaces[0], now.Add(-2*blockSize), now)
+ metadatasByShard := testSetupMetadatas(t, setup, testNamespaces[0], now.Add(-4*blockSize), now)
observedSeriesMaps := testSetupToSeriesMaps(t, setup, ns1, metadatasByShard)
verifySeriesMapsEqual(t, seriesMaps, observedSeriesMaps)
// Verify in-memory data match what we expect - no writes should be present
// because we didn't issue any writes for this namespaces
emptySeriesMaps := make(generate.SeriesBlocksByStart)
- metadatasByShard2 := testSetupMetadatas(t, setup, testNamespaces[1], now.Add(-2*blockSize), now)
+ metadatasByShard2 := testSetupMetadatas(t, setup, testNamespaces[1], now.Add(-4*blockSize), now)
observedSeriesMaps2 := testSetupToSeriesMaps(t, setup, ns2, metadatasByShard2)
verifySeriesMapsEqual(t, emptySeriesMaps, observedSeriesMaps2)
diff --git a/src/dbnode/integration/disk_cleanup_deletes_inactive_directories_test.go b/src/dbnode/integration/disk_cleanup_deletes_inactive_directories_test.go
index b7450a7f23..19512a23d1 100644
--- a/src/dbnode/integration/disk_cleanup_deletes_inactive_directories_test.go
+++ b/src/dbnode/integration/disk_cleanup_deletes_inactive_directories_test.go
@@ -26,28 +26,28 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/stretchr/testify/require"
)
func TestDiskCleansupInactiveDirectories(t *testing.T) {
- var resetSetup *testSetup
+ var resetSetup TestSetup
if testing.Short() {
t.SkipNow() // Just skip if we're doing a short run
}
// Test setup
- testOpts := newTestOptions(t)
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testOpts := NewTestOptions(t)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- md := testSetup.namespaceMetadataOrFail(testNamespaces[0])
+ md := testSetup.NamespaceMetadataOrFail(testNamespaces[0])
// Start tte server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Info("disk cleanup directories test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
// Stop the server at the end of the test
@@ -60,7 +60,7 @@ func TestDiskCleansupInactiveDirectories(t *testing.T) {
nsWaitTimeout = 10 * time.Second
namespaces = []namespace.Metadata{md}
- shardSet = testSetup.db.ShardSet()
+ shardSet = testSetup.DB().ShardSet()
shards = shardSet.All()
extraShard = shards[0]
)
@@ -68,13 +68,13 @@ func TestDiskCleansupInactiveDirectories(t *testing.T) {
// Now create some fileset files and commit logs
shardSet, err = sharding.NewShardSet(shards[1:], shardSet.HashFn())
require.NoError(t, err)
- testSetup.db.AssignShardSet(shardSet)
+ testSetup.DB().AssignShardSet(shardSet)
- clOpts := testSetup.storageOpts.CommitLogOptions()
+ clOpts := testSetup.StorageOpts().CommitLogOptions()
// Check filesets are good to go
go func() {
fsCleanupErr <- waitUntilDataFileSetsCleanedUp(clOpts,
- testSetup.db.Namespaces(), extraShard.ID(), fsWaitTimeout)
+ testSetup.DB().Namespaces(), extraShard.ID(), fsWaitTimeout)
}()
log.Info("blocking until file cleanup is received")
require.NoError(t, <-fsCleanupErr)
@@ -86,7 +86,7 @@ func TestDiskCleansupInactiveDirectories(t *testing.T) {
nsResetErr <- resetErr
}()
defer func() {
- require.NoError(t, resetSetup.stopServer())
+ require.NoError(t, resetSetup.StopServer())
}()
nsToDelete := testNamespaces[1]
log.Info("blocking until namespaces have reset and deleted")
@@ -95,7 +95,7 @@ func TestDiskCleansupInactiveDirectories(t *testing.T) {
}()
require.NoError(t, <-nsResetErr)
- filePathPrefix := testSetup.storageOpts.CommitLogOptions().FilesystemOptions().FilePathPrefix()
+ filePathPrefix := testSetup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix()
go func() {
nsCleanupErr <- waitUntilNamespacesCleanedUp(filePathPrefix, nsToDelete, nsWaitTimeout)
}()
diff --git a/src/dbnode/integration/disk_cleanup_helpers.go b/src/dbnode/integration/disk_cleanup_helpers.go
index 235ee220a1..83af4e8f46 100644
--- a/src/dbnode/integration/disk_cleanup_helpers.go
+++ b/src/dbnode/integration/disk_cleanup_helpers.go
@@ -169,22 +169,22 @@ func waitUntilNamespacesCleanedUp(filePathPrefix string, namespace ident.ID, wai
}
// nolint: unused
-func waitUntilNamespacesHaveReset(testSetup *testSetup, newNamespaces []namespace.Metadata, newShardSet sharding.ShardSet) (*testSetup, error) {
- err := testSetup.stopServer()
+func waitUntilNamespacesHaveReset(testSetup TestSetup, newNamespaces []namespace.Metadata, newShardSet sharding.ShardSet) (TestSetup, error) {
+ err := testSetup.StopServer()
if err != nil {
return nil, err
}
// Reset to the desired shard set and namespaces
// Because restarting the server would bootstrap
// To old data we wanted to delete
- testSetup.opts = testSetup.opts.SetNamespaces(newNamespaces)
+ testSetup.SetOpts(testSetup.Opts().SetNamespaces(newNamespaces))
- resetSetup, err := newTestSetup(nil, testSetup.opts, testSetup.fsOpts)
+ resetSetup, err := NewTestSetup(nil, testSetup.Opts(), testSetup.FilesystemOpts())
if err != nil {
return nil, err
}
- resetSetup.shardSet = newShardSet
- err = resetSetup.startServer()
+ resetSetup.SetShardSet(newShardSet)
+ err = resetSetup.StartServer()
if err != nil {
return nil, err
}
diff --git a/src/dbnode/integration/disk_cleanup_index_test.go b/src/dbnode/integration/disk_cleanup_index_test.go
index d622a633b2..6bb10fea9a 100644
--- a/src/dbnode/integration/disk_cleanup_index_test.go
+++ b/src/dbnode/integration/disk_cleanup_index_test.go
@@ -53,37 +53,37 @@ func TestDiskCleanupIndex(t *testing.T) {
namespace.NewIndexOptions().SetBlockSize(idxBlockSize).SetEnabled(true)))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{md})
// Test setup
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
retentionPeriod := md.Options().RetentionOptions().RetentionPeriod()
- filePathPrefix := setup.storageOpts.CommitLogOptions().FilesystemOptions().FilePathPrefix()
+ filePathPrefix := setup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix()
// Start the server
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Debug("disk index cleanup test")
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
// Now create some fileset files
numTimes := 10
fileTimes := make([]time.Time, numTimes)
- now := setup.getNowFn().Truncate(idxBlockSize)
+ now := setup.NowFn()().Truncate(idxBlockSize)
for i := 0; i < numTimes; i++ {
fileTimes[i] = now.Add(time.Duration(i) * idxBlockSize)
}
- writeIndexFileSetFiles(t, setup.storageOpts, md, fileTimes)
+ writeIndexFileSetFiles(t, setup.StorageOpts(), md, fileTimes)
deltaNow := now.Add(time.Minute)
filesets, err := fs.IndexFileSetsBefore(filePathPrefix, md.ID(), deltaNow)
@@ -92,7 +92,7 @@ func TestDiskCleanupIndex(t *testing.T) {
// Move now forward by retentionPeriod + blockSize so fileset files at now will be deleted
newNow := now.Add(retentionPeriod).Add(idxBlockSize)
- setup.setNowFn(newNow)
+ setup.SetNowFn(newNow)
// Check if files have been deleted
waitTimeout := 30 * time.Second
diff --git a/src/dbnode/integration/disk_cleanup_test.go b/src/dbnode/integration/disk_cleanup_test.go
index cdb9dc4cf2..5d77b765fa 100644
--- a/src/dbnode/integration/disk_cleanup_test.go
+++ b/src/dbnode/integration/disk_cleanup_test.go
@@ -38,12 +38,12 @@ func TestDiskCleanup(t *testing.T) {
t.SkipNow() // Just skip if we're doing a short run
}
// Test setup
- testOpts := newTestOptions(t)
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testOpts := NewTestOptions(t)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
- md := testSetup.namespaceMetadataOrFail(testNamespaces[0])
+ md := testSetup.NamespaceMetadataOrFail(testNamespaces[0])
blockSize := md.Options().RetentionOptions().BlockSize()
retentionPeriod := md.Options().RetentionOptions().RetentionPeriod()
@@ -52,8 +52,8 @@ func TestDiskCleanup(t *testing.T) {
shard = uint32(0)
numTimes = 10
fileTimes = make([]time.Time, numTimes)
- now = testSetup.getNowFn()
- commitLogOpts = testSetup.storageOpts.CommitLogOptions().
+ now = testSetup.NowFn()()
+ commitLogOpts = testSetup.StorageOpts().CommitLogOptions().
SetFlushInterval(defaultIntegrationTestFlushInterval)
)
ns1, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions())
@@ -61,7 +61,7 @@ func TestDiskCleanup(t *testing.T) {
for i := 0; i < numTimes; i++ {
fileTimes[i] = now.Add(time.Duration(i) * blockSize)
}
- writeDataFileSetFiles(t, testSetup.storageOpts, md, shard, fileTimes)
+ writeDataFileSetFiles(t, testSetup.StorageOpts(), md, shard, fileTimes)
for _, clTime := range fileTimes {
data := map[xtime.UnixNano]generate.SeriesBlock{
xtime.ToUnixNano(clTime): nil,
@@ -72,20 +72,20 @@ func TestDiskCleanup(t *testing.T) {
}
// Now start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Debug("disk cleanup test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
log.Debug("server is now up")
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
// Move now forward by retentionPeriod + 2 * blockSize so fileset files
// and commit logs at now will be deleted
- newNow := testSetup.getNowFn().Add(retentionPeriod).Add(2 * blockSize)
- testSetup.setNowFn(newNow)
+ newNow := testSetup.NowFn()().Add(retentionPeriod).Add(2 * blockSize)
+ testSetup.SetNowFn(newNow)
// Check if files have been deleted
waitTimeout := 30 * time.Second
diff --git a/src/dbnode/integration/disk_coldflush_test.go b/src/dbnode/integration/disk_coldflush_test.go
index f5c8871ad2..0ae350919a 100644
--- a/src/dbnode/integration/disk_coldflush_test.go
+++ b/src/dbnode/integration/disk_coldflush_test.go
@@ -48,34 +48,34 @@ func TestDiskColdFlushSimple(t *testing.T) {
nsID := ident.StringID("testColdWriteNs1")
ns, err := namespace.NewMetadata(nsID, nsOpts)
require.NoError(t, err)
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetTickMinimumInterval(time.Second).
SetNamespaces([]namespace.Metadata{ns})
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
- md := testSetup.namespaceMetadataOrFail(nsID)
+ md := testSetup.NamespaceMetadataOrFail(nsID)
ropts := md.Options().RetentionOptions()
blockSize := ropts.BlockSize()
- filePathPrefix := testSetup.storageOpts.CommitLogOptions().FilesystemOptions().FilePathPrefix()
+ filePathPrefix := testSetup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix()
// Start the server.
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Debug("disk coldflush test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
log.Debug("server is now up")
// Stop the server.
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
// Write warm data first so that cold data will flush.
- start := testSetup.getNowFn()
+ start := testSetup.NowFn()()
seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
warmData := []generate.BlockConfig{
{IDs: []string{"warm1", "warm2"}, NumPoints: 100, Start: start},
@@ -126,10 +126,10 @@ func TestDiskColdFlushSimple(t *testing.T) {
},
}
for _, input := range warmData {
- testSetup.setNowFn(input.Start)
+ testSetup.SetNowFn(input.Start)
testData := generate.Block(input)
seriesMaps[xtime.ToUnixNano(input.Start)] = testData
- require.NoError(t, testSetup.writeBatch(nsID, testData))
+ require.NoError(t, testSetup.WriteBatch(nsID, testData))
}
startPlusOneBlockNano := xtime.ToUnixNano(start.Add(blockSize))
// Remove warm data for `coldOverwrite`. See earlier comment for context.
@@ -140,12 +140,12 @@ func TestDiskColdFlushSimple(t *testing.T) {
// Advance time to make sure all data are flushed. Because data
// are flushed to disk asynchronously, need to poll to check
// when data are written.
- testSetup.setNowFn(testSetup.getNowFn().Add(blockSize * 2))
+ testSetup.SetNowFn(testSetup.NowFn()().Add(blockSize * 2))
maxWaitTime := time.Minute
require.NoError(t, waitUntilFileSetFilesExist(filePathPrefix, expectedDataFiles, maxWaitTime))
// Verify on-disk data match what we expect.
- verifyFlushedDataFiles(t, testSetup.shardSet, testSetup.storageOpts, nsID, seriesMaps)
+ verifyFlushedDataFiles(t, testSetup.ShardSet(), testSetup.StorageOpts(), nsID, seriesMaps)
coldData := []generate.BlockConfig{
{IDs: []string{"cold0"}, NumPoints: 80, Start: start.Add(-blockSize)},
@@ -153,11 +153,11 @@ func TestDiskColdFlushSimple(t *testing.T) {
{IDs: []string{"cold1", "cold3", "coldOverwrite"}, NumPoints: 100, Start: start.Add(blockSize)},
}
// Set "now" to start + 3 * blockSize so that the above are cold writes.
- testSetup.setNowFn(start.Add(blockSize * 3))
+ testSetup.SetNowFn(start.Add(blockSize * 3))
for _, input := range coldData {
testData := generate.Block(input)
seriesMaps[xtime.ToUnixNano(input.Start)] = append(seriesMaps[xtime.ToUnixNano(input.Start)], testData...)
- require.NoError(t, testSetup.writeBatch(nsID, testData))
+ require.NoError(t, testSetup.WriteBatch(nsID, testData))
}
log.Debug("cold data is now written")
@@ -238,5 +238,5 @@ func TestDiskColdFlushSimple(t *testing.T) {
require.NoError(t, waitUntilFileSetFilesExist(filePathPrefix, expectedDataFiles, maxWaitTime))
// Verify on-disk data match what we expect
- verifyFlushedDataFiles(t, testSetup.shardSet, testSetup.storageOpts, nsID, seriesMaps)
+ verifyFlushedDataFiles(t, testSetup.ShardSet(), testSetup.StorageOpts(), nsID, seriesMaps)
}
diff --git a/src/dbnode/integration/disk_flush_helpers.go b/src/dbnode/integration/disk_flush_helpers.go
index 7d3c87d400..ce954a036c 100644
--- a/src/dbnode/integration/disk_flush_helpers.go
+++ b/src/dbnode/integration/disk_flush_helpers.go
@@ -40,6 +40,7 @@ import (
"github.com/m3db/m3/src/x/ident/testutil"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/pborman/uuid"
"github.com/stretchr/testify/require"
)
@@ -84,7 +85,8 @@ func waitUntilSnapshotFilesFlushed(
namespace ident.ID,
expectedSnapshots []snapshotID,
timeout time.Duration,
-) error {
+) (uuid.UUID, error) {
+ var snapshotID uuid.UUID
dataFlushed := func() bool {
for _, shard := range shardSet.AllIDs() {
for _, e := range expectedSnapshots {
@@ -102,14 +104,19 @@ func waitUntilSnapshotFilesFlushed(
if !(latest.ID.VolumeIndex >= e.minVolume) {
return false
}
+
+ _, snapshotID, err = latest.SnapshotTimeAndID()
+ if err != nil {
+ panic(err)
+ }
}
}
return true
}
if waitUntil(dataFlushed, timeout) {
- return nil
+ return snapshotID, nil
}
- return errDiskFlushTimedOut
+ return snapshotID, errDiskFlushTimedOut
}
func waitUntilDataFilesFlushed(
diff --git a/src/dbnode/integration/disk_flush_multi_ns_test.go b/src/dbnode/integration/disk_flush_multi_ns_test.go
index 4a860d5f1d..9856bac21b 100644
--- a/src/dbnode/integration/disk_flush_multi_ns_test.go
+++ b/src/dbnode/integration/disk_flush_multi_ns_test.go
@@ -53,29 +53,29 @@ func TestDiskFlushMultipleNamespace(t *testing.T) {
require.NoError(t, err)
ns2, err := namespace.NewMetadata(testNamespaces[1], namespace.NewOptions().SetRetentionOptions(ns2ROpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
// Test setup
- testSetup, err := newTestSetup(t, opts, nil)
+ testSetup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
- clOpts := testSetup.storageOpts.CommitLogOptions()
+ clOpts := testSetup.StorageOpts().CommitLogOptions()
filePathPrefix := clOpts.FilesystemOptions().FilePathPrefix()
// it's aligned to lcm of ns block sizes
- now := testSetup.getNowFn()
+ now := testSetup.NowFn()()
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Info("disk flush multiple namespaces test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
log.Info("server is now up")
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Info("server is now down")
}()
@@ -95,10 +95,10 @@ func TestDiskFlushMultipleNamespace(t *testing.T) {
for _, ns1Input := range ns1InputData {
// write the data for ns1, always
- testSetup.setNowFn(ns1Input.Start)
+ testSetup.SetNowFn(ns1Input.Start)
testData := generate.Block(ns1Input)
ns1SeriesMaps[xtime.ToUnixNano(ns1Input.Start)] = testData
- require.NoError(t, testSetup.writeBatch(testNamespaces[0], testData))
+ require.NoError(t, testSetup.WriteBatch(testNamespaces[0], testData))
log.Info("wrote ns1 for time", zap.Time("start", ns1Input.Start))
// when applicable, write the data for ns2, too
@@ -109,7 +109,7 @@ func TestDiskFlushMultipleNamespace(t *testing.T) {
testData = generate.Block(ns2Input)
ns2SeriesMaps[xtime.ToUnixNano(ns2Input.Start)] = testData
log.Info("wrote ns2 for time", zap.Time("start", ns2Input.Start))
- require.NoError(t, testSetup.writeBatch(testNamespaces[1], testData))
+ require.NoError(t, testSetup.WriteBatch(testNamespaces[1], testData))
}
}
log.Info("test data written successfully")
@@ -119,14 +119,14 @@ func TestDiskFlushMultipleNamespace(t *testing.T) {
// when data are written.
maxWaitTime := time.Minute
log.Info("waiting until data is flushed")
- testSetup.setNowFn(testSetup.getNowFn().Add(3 * ns1BlockSize))
- require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, testSetup.shardSet, testNamespaces[0], ns1SeriesMaps, maxWaitTime))
- require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, testSetup.shardSet, testNamespaces[1], ns2SeriesMaps, maxWaitTime))
+ testSetup.SetNowFn(testSetup.NowFn()().Add(3 * ns1BlockSize))
+ require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, testSetup.ShardSet(), testNamespaces[0], ns1SeriesMaps, maxWaitTime))
+ require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, testSetup.ShardSet(), testNamespaces[1], ns2SeriesMaps, maxWaitTime))
log.Info("data has been flushed")
// Verify on-disk data match what we expect
log.Info("verifying flushed data")
- verifyFlushedDataFiles(t, testSetup.shardSet, testSetup.storageOpts, testNamespaces[0], ns1SeriesMaps)
- verifyFlushedDataFiles(t, testSetup.shardSet, testSetup.storageOpts, testNamespaces[1], ns2SeriesMaps)
+ verifyFlushedDataFiles(t, testSetup.ShardSet(), testSetup.StorageOpts(), testNamespaces[0], ns1SeriesMaps)
+ verifyFlushedDataFiles(t, testSetup.ShardSet(), testSetup.StorageOpts(), testNamespaces[1], ns2SeriesMaps)
log.Info("flushed data verified")
}
diff --git a/src/dbnode/integration/disk_flush_test.go b/src/dbnode/integration/disk_flush_test.go
index fb813051cb..74527e54f8 100644
--- a/src/dbnode/integration/disk_flush_test.go
+++ b/src/dbnode/integration/disk_flush_test.go
@@ -37,50 +37,50 @@ func TestDiskFlushSimple(t *testing.T) {
t.SkipNow() // Just skip if we're doing a short run
}
// Test setup
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetTickMinimumInterval(time.Second)
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
- md := testSetup.namespaceMetadataOrFail(testNamespaces[0])
+ md := testSetup.NamespaceMetadataOrFail(testNamespaces[0])
blockSize := md.Options().RetentionOptions().BlockSize()
- filePathPrefix := testSetup.storageOpts.CommitLogOptions().FilesystemOptions().FilePathPrefix()
+ filePathPrefix := testSetup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix()
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Debug("disk flush test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
// Write test data
- now := testSetup.getNowFn()
+ now := testSetup.NowFn()()
seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
inputData := []generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now},
{IDs: []string{"foo", "baz"}, NumPoints: 50, Start: now.Add(blockSize)},
}
for _, input := range inputData {
- testSetup.setNowFn(input.Start)
+ testSetup.SetNowFn(input.Start)
testData := generate.Block(input)
seriesMaps[xtime.ToUnixNano(input.Start)] = testData
- require.NoError(t, testSetup.writeBatch(testNamespaces[0], testData))
+ require.NoError(t, testSetup.WriteBatch(testNamespaces[0], testData))
}
log.Debug("test data is now written")
// Advance time to make sure all data are flushed. Because data
// are flushed to disk asynchronously, need to poll to check
// when data are written.
- testSetup.setNowFn(testSetup.getNowFn().Add(blockSize * 2))
+ testSetup.SetNowFn(testSetup.NowFn()().Add(blockSize * 2))
maxWaitTime := time.Minute
- require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, testSetup.shardSet, testNamespaces[0], seriesMaps, maxWaitTime))
+ require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, testSetup.ShardSet(), testNamespaces[0], seriesMaps, maxWaitTime))
// Verify on-disk data match what we expect
- verifyFlushedDataFiles(t, testSetup.shardSet, testSetup.storageOpts, testNamespaces[0], seriesMaps)
+ verifyFlushedDataFiles(t, testSetup.ShardSet(), testSetup.StorageOpts(), testNamespaces[0], seriesMaps)
}
diff --git a/src/dbnode/integration/disk_snapshot_test.go b/src/dbnode/integration/disk_snapshot_test.go
index bf6f743dd4..5b7d886339 100644
--- a/src/dbnode/integration/disk_snapshot_test.go
+++ b/src/dbnode/integration/disk_snapshot_test.go
@@ -57,30 +57,30 @@ func TestDiskSnapshotSimple(t *testing.T) {
md2, err := namespace.NewMetadata(testNamespaces[1], nOpts)
require.NoError(t, err)
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetTickMinimumInterval(time.Second).
SetNamespaces([]namespace.Metadata{md1, md2})
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
- shardSet := testSetup.shardSet
+ shardSet := testSetup.ShardSet()
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Debug("disk flush test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
// Write test data
var (
- currBlock = testSetup.getNowFn().Truncate(blockSize)
+ currBlock = testSetup.NowFn()().Truncate(blockSize)
now = currBlock.Add(11 * time.Minute)
assertTimeAllowsWritesToAllBlocks = func(ti time.Time) {
// Make sure now is within bufferPast of the previous block
@@ -91,7 +91,7 @@ func TestDiskSnapshotSimple(t *testing.T) {
)
assertTimeAllowsWritesToAllBlocks(now)
- testSetup.setNowFn(now)
+ testSetup.SetNowFn(now)
var (
seriesMaps = make(map[xtime.UnixNano]generate.SeriesBlock)
@@ -107,8 +107,8 @@ func TestDiskSnapshotSimple(t *testing.T) {
for _, input := range inputData {
testData := generate.Block(input)
seriesMaps[xtime.ToUnixNano(input.Start.Truncate(blockSize))] = testData
- for _, ns := range testSetup.namespaces {
- require.NoError(t, testSetup.writeBatch(ns.ID(), testData))
+ for _, ns := range testSetup.Namespaces() {
+ require.NoError(t, testSetup.WriteBatch(ns.ID(), testData))
}
}
@@ -119,13 +119,13 @@ func TestDiskSnapshotSimple(t *testing.T) {
// minimum time between snapshots), and then waiting for the snapshot files with
// the measured volume index + 1.
var (
- snapshotsToWaitForByNS = make([][]snapshotID, 0, len(testSetup.namespaces))
- filePathPrefix = testSetup.storageOpts.
+ snapshotsToWaitForByNS = make([][]snapshotID, 0, len(testSetup.Namespaces()))
+ filePathPrefix = testSetup.StorageOpts().
CommitLogOptions().
FilesystemOptions().
FilePathPrefix()
)
- for _, ns := range testSetup.namespaces {
+ for _, ns := range testSetup.Namespaces() {
snapshotsToWaitForByNS = append(snapshotsToWaitForByNS, []snapshotID{
{
blockStart: currBlock.Add(-blockSize),
@@ -145,37 +145,39 @@ func TestDiskSnapshotSimple(t *testing.T) {
})
}
- now = testSetup.getNowFn().Add(2 * time.Minute)
+ now = testSetup.NowFn()().Add(2 * time.Minute)
assertTimeAllowsWritesToAllBlocks(now)
- testSetup.setNowFn(now)
+ testSetup.SetNowFn(now)
maxWaitTime := time.Minute
- for i, ns := range testSetup.namespaces {
+ for i, ns := range testSetup.Namespaces() {
log.Info("waiting for snapshot files to flush")
- require.NoError(t, waitUntilSnapshotFilesFlushed(
- filePathPrefix, shardSet, ns.ID(), snapshotsToWaitForByNS[i], maxWaitTime))
+ _, err := waitUntilSnapshotFilesFlushed(filePathPrefix, shardSet, ns.ID(), snapshotsToWaitForByNS[i], maxWaitTime)
+ require.NoError(t, err)
log.Info("verifying snapshot files")
- verifySnapshottedDataFiles(t, shardSet, testSetup.storageOpts, ns.ID(), seriesMaps)
+ verifySnapshottedDataFiles(t, shardSet, testSetup.StorageOpts(), ns.ID(), seriesMaps)
}
var (
- oldTime = testSetup.getNowFn()
+ oldTime = testSetup.NowFn()()
newTime = oldTime.Add(blockSize * 2)
)
- testSetup.setNowFn(newTime)
+ testSetup.SetNowFn(newTime)
- for _, ns := range testSetup.namespaces {
+ for _, ns := range testSetup.Namespaces() {
log.Info("waiting for new snapshot files to be written out")
snapshotsToWaitFor := []snapshotID{{blockStart: newTime.Truncate(blockSize)}}
- require.NoError(t, waitUntilSnapshotFilesFlushed(
- filePathPrefix, shardSet, ns.ID(), snapshotsToWaitFor, maxWaitTime))
+ // NB(bodu): We need to check if a specific snapshot ID was deleted since snapshotting logic now changed
+ // to always snapshotting every block start w/in retention.
+ snapshotID, err := waitUntilSnapshotFilesFlushed(filePathPrefix, shardSet, ns.ID(), snapshotsToWaitFor, maxWaitTime)
+ require.NoError(t, err)
log.Info("waiting for old snapshot files to be deleted")
for _, shard := range shardSet.All() {
waitUntil(func() bool {
// Increase the time each check to ensure that the filesystem processes are able to progress (some
// of them throttle themselves based on time elapsed since the previous time.)
- testSetup.setNowFn(testSetup.getNowFn().Add(10 * time.Second))
- exists, err := fs.SnapshotFileSetExistsAt(filePathPrefix, ns.ID(), shard.ID(), oldTime.Truncate(blockSize))
+ testSetup.SetNowFn(testSetup.NowFn()().Add(10 * time.Second))
+ exists, err := fs.SnapshotFileSetExistsAt(filePathPrefix, ns.ID(), snapshotID, shard.ID(), oldTime.Truncate(blockSize))
require.NoError(t, err)
return !exists
}, maxWaitTime)
diff --git a/src/dbnode/integration/dynamic_namespace_add_test.go b/src/dbnode/integration/dynamic_namespace_add_test.go
index bf5f0be2ba..e276abddec 100644
--- a/src/dbnode/integration/dynamic_namespace_add_test.go
+++ b/src/dbnode/integration/dynamic_namespace_add_test.go
@@ -46,7 +46,7 @@ func TestDynamicNamespaceAdd(t *testing.T) {
defer ctrl.Finish()
// test options
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetTickMinimumInterval(time.Second)
require.True(t, len(testOpts.Namespaces()) >= 2)
ns0 := testOpts.Namespaces()[0]
@@ -82,19 +82,19 @@ func TestDynamicNamespaceAdd(t *testing.T) {
require.NoError(t, err)
// Test setup
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
- require.NoError(t, testSetup.startServer())
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
// Stop the server
stopped := false
defer func() {
stopped = true
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Info("server is now down")
}()
@@ -113,13 +113,13 @@ func TestDynamicNamespaceAdd(t *testing.T) {
go func() {
wg.Done()
for !stopped {
- testSetup.blockLeaseManager.OpenLease(leaser, leaseDescriptor, leaseState)
+ testSetup.BlockLeaseManager().OpenLease(leaser, leaseDescriptor, leaseState)
}
}()
go func() {
wg.Done()
for !stopped {
- testSetup.blockLeaseManager.OpenLatestLease(leaser, leaseDescriptor)
+ testSetup.BlockLeaseManager().OpenLatestLease(leaser, leaseDescriptor)
}
}()
}
@@ -127,7 +127,7 @@ func TestDynamicNamespaceAdd(t *testing.T) {
// Write test data
blockSize := ns0.Options().RetentionOptions().BlockSize()
- now := testSetup.getNowFn()
+ now := testSetup.NowFn()()
seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
inputData := []generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now},
@@ -142,7 +142,7 @@ func TestDynamicNamespaceAdd(t *testing.T) {
// fail to write to non-existent namespaces
for _, testData := range seriesMaps {
- require.Error(t, testSetup.writeBatch(ns0.ID(), testData))
+ require.Error(t, testSetup.WriteBatch(ns0.ID(), testData))
}
// update value in kv
@@ -152,7 +152,7 @@ func TestDynamicNamespaceAdd(t *testing.T) {
// wait until the new namespace is registered
nsExists := func() bool {
- _, ok := testSetup.db.Namespace(ns0.ID())
+ _, ok := testSetup.DB().Namespace(ns0.ID())
return ok
}
require.True(t, waitUntil(nsExists, 5*time.Second))
@@ -160,15 +160,15 @@ func TestDynamicNamespaceAdd(t *testing.T) {
// write to new namespace
for start, testData := range seriesMaps {
- testSetup.setNowFn(start.ToTime())
- require.NoError(t, testSetup.writeBatch(ns0.ID(), testData))
+ testSetup.SetNowFn(start.ToTime())
+ require.NoError(t, testSetup.WriteBatch(ns0.ID(), testData))
}
log.Info("test data is now written")
// Advance time and sleep for a long enough time so data blocks are sealed during ticking
- testSetup.setNowFn(testSetup.getNowFn().Add(2 * blockSize))
- later := testSetup.getNowFn()
- testSetup.sleepFor10xTickMinimumInterval()
+ testSetup.SetNowFn(testSetup.NowFn()().Add(2 * blockSize))
+ later := testSetup.NowFn()()
+ testSetup.SleepFor10xTickMinimumInterval()
metadatasByShard := testSetupMetadatas(t, testSetup, ns0.ID(), now, later)
observedSeriesMaps := testSetupToSeriesMaps(t, testSetup, ns0, metadatasByShard)
diff --git a/src/dbnode/integration/dynamic_namespace_delete_test.go b/src/dbnode/integration/dynamic_namespace_delete_test.go
index 2f9b1b7a12..c9cfcb0b78 100644
--- a/src/dbnode/integration/dynamic_namespace_delete_test.go
+++ b/src/dbnode/integration/dynamic_namespace_delete_test.go
@@ -44,7 +44,7 @@ func TestDynamicNamespaceDelete(t *testing.T) {
}
// test options
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetTickMinimumInterval(time.Second)
require.True(t, len(testOpts.Namespaces()) >= 2)
ns0 := testOpts.Namespaces()[0]
@@ -86,23 +86,23 @@ func TestDynamicNamespaceDelete(t *testing.T) {
require.NoError(t, err)
// Test setup
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
- require.NoError(t, testSetup.startServer())
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Info("server is now down")
}()
// Write test data
blockSize := ns0.Options().RetentionOptions().BlockSize()
- now := testSetup.getNowFn()
+ now := testSetup.NowFn()()
seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
inputData := []generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now},
@@ -117,7 +117,7 @@ func TestDynamicNamespaceDelete(t *testing.T) {
// fail to write to non-existent namespaces
for _, testData := range seriesMaps {
- require.Error(t, testSetup.writeBatch(ns0.ID(), testData))
+ require.Error(t, testSetup.WriteBatch(ns0.ID(), testData))
}
// delete namespace key, ensure update propagates
@@ -137,7 +137,7 @@ func TestDynamicNamespaceDelete(t *testing.T) {
// wait until the new namespace is registered
nsExists := func() bool {
- _, ok := testSetup.db.Namespace(ns0.ID())
+ _, ok := testSetup.DB().Namespace(ns0.ID())
return ok
}
require.True(t, waitUntil(nsExists, 5*time.Second))
@@ -145,15 +145,15 @@ func TestDynamicNamespaceDelete(t *testing.T) {
// write to new namespace
for start, testData := range seriesMaps {
- testSetup.setNowFn(start.ToTime())
- require.NoError(t, testSetup.writeBatch(ns0.ID(), testData))
+ testSetup.SetNowFn(start.ToTime())
+ require.NoError(t, testSetup.WriteBatch(ns0.ID(), testData))
}
log.Info("test data is now written")
// Advance time and sleep for a long enough time so data blocks are sealed during ticking
- testSetup.setNowFn(testSetup.getNowFn().Add(2 * blockSize))
- later := testSetup.getNowFn()
- testSetup.sleepFor10xTickMinimumInterval()
+ testSetup.SetNowFn(testSetup.NowFn()().Add(2 * blockSize))
+ later := testSetup.NowFn()()
+ testSetup.SleepFor10xTickMinimumInterval()
metadatasByShard := testSetupMetadatas(t, testSetup, ns0.ID(), now, later)
observedSeriesMaps := testSetupToSeriesMaps(t, testSetup, ns0, metadatasByShard)
diff --git a/src/dbnode/integration/encoder_limit_test.go b/src/dbnode/integration/encoder_limit_test.go
new file mode 100644
index 0000000000..89a0aa74b4
--- /dev/null
+++ b/src/dbnode/integration/encoder_limit_test.go
@@ -0,0 +1,118 @@
+// +build integration
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package integration
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestEncoderLimit(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow()
+ }
+
+ // We don't want a tick to happen during this test, since that will
+ // interfere with testing encoders due to the tick merging them.
+ testOpts := NewTestOptions(t).SetTickMinimumInterval(time.Minute)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
+ require.NoError(t, err)
+ defer testSetup.Close()
+
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
+ log.Info("server is now up")
+
+ defer func() {
+ require.NoError(t, testSetup.StopServer())
+ log.Info("server is now down")
+ }()
+
+ now := testSetup.NowFn()()
+
+ db := testSetup.DB()
+ mgr := db.Options().RuntimeOptionsManager()
+ encoderLimit := 5
+ newRuntimeOpts := mgr.Get().SetEncodersPerBlockLimit(encoderLimit)
+ mgr.Update(newRuntimeOpts)
+
+ session, err := testSetup.M3DBClient().DefaultSession()
+ require.NoError(t, err)
+ nsID := testNamespaces[0]
+ seriesID := ident.StringID("foo")
+
+ for i := 0; i < encoderLimit+5; i++ {
+ err = session.Write(
+ nsID, seriesID,
+ // Write backwards so that a new encoder gets created every write.
+ now.Add(time.Duration(50-i)*time.Second),
+ 123, xtime.Second, nil,
+ )
+
+ if i >= encoderLimit {
+ require.Error(t, err)
+ // A rejected write due to hitting the max encoder limit should be
+ // a bad request so that the client knows to not repeat the write
+ // request, since that will exacerbate the problem.
+ require.True(t, client.IsBadRequestError(err))
+ } else {
+ require.NoError(t, err)
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ err = session.Write(
+ nsID, seriesID,
+ now.Add(time.Duration(51+i)*time.Second),
+ 123, xtime.Second, nil,
+ )
+
+ // Even though we're doing more writes, these can fit into existing
+ // encoders since they are all ahead of existing writes, so expect
+ // no errors writing.
+ require.NoError(t, err)
+ }
+
+ // Now allow an unlimited number of encoders.
+ encoderLimit = 0
+ newRuntimeOpts = mgr.Get().SetEncodersPerBlockLimit(encoderLimit)
+ mgr.Update(newRuntimeOpts)
+
+ for i := 0; i < 20; i++ {
+ err = session.Write(
+ nsID, seriesID,
+ now.Add(time.Duration(20-i)*time.Second),
+ 123, xtime.Second, nil,
+ )
+
+ // Now there's no encoder limit, so no error even though each of these
+ // additional writes creates a new encoder.
+ require.NoError(t, err)
+ }
+}
diff --git a/src/dbnode/integration/fetch_tagged_quorum_test.go b/src/dbnode/integration/fetch_tagged_quorum_test.go
index 6ef3aca64c..eeb07baabd 100644
--- a/src/dbnode/integration/fetch_tagged_quorum_test.go
+++ b/src/dbnode/integration/fetch_tagged_quorum_test.go
@@ -60,7 +60,7 @@ func TestFetchTaggedQuorumNormalOnlyOneUp(t *testing.T) {
defer closeFn()
// fetch succeeds from one node
- require.NoError(t, nodes[0].startServer())
+ require.NoError(t, nodes[0].StartServer())
writeTagged(t, nodes[0])
testFetch.assertContainsTaggedResult(t,
@@ -86,8 +86,8 @@ func TestFetchTaggedQuorumNormalOnlyTwoUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- require.NoError(t, nodes[1].startServer())
+ require.NoError(t, nodes[0].StartServer())
+ require.NoError(t, nodes[1].StartServer())
writeTagged(t, nodes[0], nodes[1])
// succeed to two nodes
@@ -113,9 +113,9 @@ func TestFetchTaggedQuorumNormalAllUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- require.NoError(t, nodes[1].startServer())
- require.NoError(t, nodes[2].startServer())
+ require.NoError(t, nodes[0].StartServer())
+ require.NoError(t, nodes[1].StartServer())
+ require.NoError(t, nodes[2].StartServer())
writeTagged(t, nodes...)
// succeed to all nodes
@@ -142,8 +142,8 @@ func TestFetchTaggedQuorumAddNodeOnlyLeavingInitializingUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
+ require.NoError(t, nodes[0].StartServer())
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
writeTagged(t, nodes[0], nodes[3])
// No fetches succeed to available nodes
@@ -170,9 +170,9 @@ func TestFetchTaggedQuorumAddNodeOnlyOneNormalAndLeavingInitializingUp(t *testin
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- require.NoError(t, nodes[1].startServer())
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
+ require.NoError(t, nodes[0].StartServer())
+ require.NoError(t, nodes[1].StartServer())
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
writeTagged(t, nodes[0], nodes[1], nodes[3])
// fetches succeed to one available node
@@ -202,10 +202,10 @@ func TestFetchTaggedQuorumAddNodeAllUp(t *testing.T) {
defer closeFn()
// fetches succeed to one available node
- require.NoError(t, nodes[0].startServer())
- require.NoError(t, nodes[1].startServer())
- require.NoError(t, nodes[2].startServer())
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
+ require.NoError(t, nodes[0].StartServer())
+ require.NoError(t, nodes[1].StartServer())
+ require.NoError(t, nodes[2].StartServer())
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
writeTagged(t, nodes...)
testFetch.assertContainsTaggedResult(t, topology.ReadConsistencyLevelOne,
@@ -260,7 +260,7 @@ func makeMultiNodeSetup(
nspaces := []namespace.Metadata{md1, md2}
nodes, topoInit, closeFn := newNodes(t, numShards, instances, nspaces, asyncInserts)
for _, node := range nodes {
- node.opts = node.opts.SetNumShards(numShards)
+ node.SetOpts(node.Opts().SetNumShards(numShards))
}
clientopts := client.NewOptions().
@@ -291,13 +291,13 @@ func makeTestFetchTagged(
q, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
require.NoError(t, err)
- startTime := nodes[0].getNowFn()
+ startTime := nodes[0].NowFn()()
series, metadata, err := s.FetchTagged(testNamespaces[0],
index.Query{Query: q},
index.QueryOptions{
StartInclusive: startTime.Add(-time.Minute),
EndExclusive: startTime.Add(time.Minute),
- Limit: 100,
+ SeriesLimit: 100,
})
return series, metadata.Exhaustive, err
}
@@ -307,13 +307,13 @@ func makeTestFetchTagged(
func writeTagged(
t *testing.T,
- nodes ...*testSetup,
+ nodes ...TestSetup,
) {
ctx := context.NewContext()
defer ctx.BlockingClose()
for _, n := range nodes {
- require.NoError(t, n.db.WriteTagged(ctx, testNamespaces[0], ident.StringID("quorumTest"),
+ require.NoError(t, n.DB().WriteTagged(ctx, testNamespaces[0], ident.StringID("quorumTest"),
ident.NewTagsIterator(ident.NewTags(ident.StringTag("foo", "bar"), ident.StringTag("boo", "baz"))),
- n.getNowFn(), 42, xtime.Second, nil))
+ n.NowFn()(), 42, xtime.Second, nil))
}
}
diff --git a/src/dbnode/integration/fs_bootstrap_index_test.go b/src/dbnode/integration/fs_bootstrap_index_test.go
index 0ca951d0f8..714b8935f6 100644
--- a/src/dbnode/integration/fs_bootstrap_index_test.go
+++ b/src/dbnode/integration/fs_bootstrap_index_test.go
@@ -28,29 +28,69 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
- persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
- "github.com/m3db/m3/src/dbnode/storage/index/compaction"
"github.com/m3db/m3/src/m3ninx/idx"
- "github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/stretchr/testify/require"
+ "go.uber.org/zap"
)
func TestFilesystemBootstrapIndexWithIndexingEnabled(t *testing.T) {
+ testFilesystemBootstrapIndexWithIndexingEnabled(t,
+ testFilesystemBootstrapIndexWithIndexingEnabledOptions{})
+}
+
+// TestFilesystemBootstrapIndexWithIndexingEnabledAndCheckTickFreeMmap makes
+// sure that bootstrapped segments free mmap calls occur.
+func TestFilesystemBootstrapIndexWithIndexingEnabledAndCheckTickFreeMmap(t *testing.T) {
+ testFilesystemBootstrapIndexWithIndexingEnabled(t,
+ testFilesystemBootstrapIndexWithIndexingEnabledOptions{
+ test: func(t *testing.T, setup TestSetup) {
+ var (
+ cancellable = context.NewCancellable()
+ numSegmentsBootstrapped int64
+ freeMmap int64
+ )
+ for _, ns := range setup.DB().Namespaces() {
+ idx, err := ns.Index()
+ require.NoError(t, err)
+
+ result, err := idx.Tick(cancellable, time.Now())
+ require.NoError(t, err)
+
+ numSegmentsBootstrapped += result.NumSegmentsBootstrapped
+ freeMmap += result.FreeMmap
+ }
+
+ log := setup.StorageOpts().InstrumentOptions().Logger()
+ log.Info("ticked namespaces",
+ zap.Int64("numSegmentsBootstrapped", numSegmentsBootstrapped),
+ zap.Int64("freeMmap", freeMmap))
+ require.True(t, numSegmentsBootstrapped > 0)
+ require.True(t, freeMmap > 0)
+ },
+ })
+}
+
+type testFilesystemBootstrapIndexWithIndexingEnabledOptions struct {
+ // test is an extended test to run at the end of the core bootstrap test.
+ test func(t *testing.T, setup TestSetup)
+}
+
+func testFilesystemBootstrapIndexWithIndexingEnabled(
+ t *testing.T,
+ testOpts testFilesystemBootstrapIndexWithIndexingEnabledOptions,
+) {
if testing.Short() {
t.SkipNow() // Just skip if we're doing a short run
}
var (
blockSize = 2 * time.Hour
- rOpts = retention.NewOptions().SetRetentionPeriod(2 * blockSize).SetBlockSize(blockSize)
+ rOpts = retention.NewOptions().SetRetentionPeriod(6 * blockSize).SetBlockSize(blockSize)
idxOpts = namespace.NewIndexOptions().SetEnabled(true).SetBlockSize(2 * blockSize)
nOpts = namespace.NewOptions().SetRetentionOptions(rOpts).SetIndexOptions(idxOpts)
)
@@ -59,57 +99,20 @@ func TestFilesystemBootstrapIndexWithIndexingEnabled(t *testing.T) {
ns2, err := namespace.NewMetadata(testNamespaces[1], nOpts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
// Test setup
- setup, err := newTestSetup(t, opts, nil)
- require.NoError(t, err)
- defer setup.close()
-
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
-
- persistMgr, err := persistfs.NewPersistManager(fsOpts)
- require.NoError(t, err)
-
- storageIdxOpts := setup.storageOpts.IndexOptions()
- compactor, err := compaction.NewCompactor(storageIdxOpts.DocumentArrayPool(),
- index.DocumentArrayPoolCapacity,
- storageIdxOpts.SegmentBuilderOptions(),
- storageIdxOpts.FSTSegmentOptions(),
- compaction.CompactorOptions{
- FSTWriterOptions: &fst.WriterOptions{
- // DisableRegistry is set to true to trade a larger FST size
- // for a faster FST compaction since we want to reduce the end
- // to end latency for time to first index a metric.
- DisableRegistry: true,
- },
- })
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
+ defer setup.Close()
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := result.NewOptions().
- SetSeriesCachePolicy(setup.storageOpts.SeriesCachePolicy())
- bfsOpts := fs.NewOptions().
- SetResultOptions(bsOpts).
- SetFilesystemOptions(fsOpts).
- SetIndexOptions(storageIdxOpts).
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
- SetPersistManager(persistMgr).
- SetCompactor(compactor)
- bs, err := fs.NewFileSystemBootstrapperProvider(bfsOpts, noOpAll)
- require.NoError(t, err)
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- processProvider, err := bootstrap.NewProcessProvider(bs, processOpts, bsOpts)
- require.NoError(t, err)
-
- setup.storageOpts = setup.storageOpts.
- SetBootstrapProcessProvider(processProvider)
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ WithFileSystem: true,
+ }))
// Write test data
- now := setup.getNowFn()
+ now := setup.NowFn()()
fooSeries := generate.Series{
ID: ident.StringID("foo"),
@@ -131,13 +134,13 @@ func TestFilesystemBootstrapIndexWithIndexingEnabled(t *testing.T) {
IDs: []string{fooSeries.ID.String()},
Tags: fooSeries.Tags,
NumPoints: 100,
- Start: now.Add(-blockSize),
+ Start: now.Add(-3 * blockSize),
},
{
IDs: []string{barSeries.ID.String()},
Tags: barSeries.Tags,
NumPoints: 100,
- Start: now.Add(-blockSize),
+ Start: now.Add(-3 * blockSize),
},
{
IDs: []string{fooSeries.ID.String()},
@@ -157,14 +160,14 @@ func TestFilesystemBootstrapIndexWithIndexingEnabled(t *testing.T) {
require.NoError(t, writeTestDataToDisk(ns2, setup, nil, 0))
// Start the server with filesystem bootstrapper
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Debug("filesystem bootstrap test")
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
@@ -173,7 +176,7 @@ func TestFilesystemBootstrapIndexWithIndexingEnabled(t *testing.T) {
verifySeriesMaps(t, setup, testNamespaces[1], nil)
// Issue some index queries
- session, err := setup.m3dbClient.DefaultSession()
+ session, err := setup.M3DBClient().DefaultSession()
require.NoError(t, err)
start := now.Add(-rOpts.RetentionPeriod())
@@ -207,4 +210,8 @@ func TestFilesystemBootstrapIndexWithIndexingEnabled(t *testing.T) {
exhaustive: true,
expected: []generate.Series{barSeries, bazSeries},
})
+
+ if testOpts.test != nil {
+ testOpts.test(t, setup)
+ }
}
diff --git a/src/dbnode/integration/fs_bootstrap_index_volume_type_test.go b/src/dbnode/integration/fs_bootstrap_index_volume_type_test.go
new file mode 100644
index 0000000000..58ac71aca6
--- /dev/null
+++ b/src/dbnode/integration/fs_bootstrap_index_volume_type_test.go
@@ -0,0 +1,246 @@
+// +build integration
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package integration
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/integration/generate"
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/idx"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
+ "github.com/m3db/m3/src/x/ident"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestFilesystemBootstrapIndexVolumeTypes(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow() // Just skip if we're doing a short run
+ }
+
+ var (
+ blockSize = 2 * time.Hour
+ rOpts = retention.NewOptions().SetRetentionPeriod(2 * blockSize).SetBlockSize(blockSize)
+ idxOpts = namespace.NewIndexOptions().SetEnabled(true).SetBlockSize(2 * blockSize)
+ nOpts = namespace.NewOptions().SetRetentionOptions(rOpts).SetIndexOptions(idxOpts)
+ )
+ ns1, err := namespace.NewMetadata(testNamespaces[0], nOpts)
+ require.NoError(t, err)
+ ns2, err := namespace.NewMetadata(testNamespaces[1], nOpts)
+ require.NoError(t, err)
+
+ opts := NewTestOptions(t).
+ SetNamespaces([]namespace.Metadata{ns1, ns2})
+
+ // Test setup
+ setup, err := NewTestSetup(t, opts, nil)
+ require.NoError(t, err)
+ defer setup.Close()
+
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ WithFileSystem: true,
+ }))
+
+ // Write test data
+ now := setup.NowFn()()
+
+ fooSeries := generate.Series{
+ ID: ident.StringID("foo"),
+ Tags: ident.NewTags(ident.StringTag("city", "new_york"), ident.StringTag("foo", "foo")),
+ }
+ fooDoc := doc.Document{
+ ID: fooSeries.ID.Bytes(),
+ Fields: []doc.Field{
+ doc.Field{Name: []byte("city"), Value: []byte("new_york")},
+ doc.Field{Name: []byte("foo"), Value: []byte("foo")},
+ },
+ }
+
+ barSeries := generate.Series{
+ ID: ident.StringID("bar"),
+ Tags: ident.NewTags(ident.StringTag("city", "new_jersey")),
+ }
+ barDoc := doc.Document{
+ ID: barSeries.ID.Bytes(),
+ Fields: []doc.Field{
+ doc.Field{Name: []byte("city"), Value: []byte("new_jersey")},
+ },
+ }
+
+ bazSeries := generate.Series{
+ ID: ident.StringID("baz"),
+ Tags: ident.NewTags(ident.StringTag("city", "seattle")),
+ }
+ bazDoc := doc.Document{
+ ID: bazSeries.ID.Bytes(),
+ Fields: []doc.Field{
+ doc.Field{Name: []byte("city"), Value: []byte("seattle")},
+ },
+ }
+
+ quxSeries := generate.Series{
+ ID: ident.StringID("qux"),
+ Tags: ident.NewTags(ident.StringTag("city", "new_harmony")),
+ }
+ quxDoc := doc.Document{
+ ID: quxSeries.ID.Bytes(),
+ Fields: []doc.Field{
+ doc.Field{Name: []byte("city"), Value: []byte("new_harmony")},
+ },
+ }
+
+ duxSeries := generate.Series{
+ ID: ident.StringID("dux"),
+ Tags: ident.NewTags(ident.StringTag("city", "los_angeles")),
+ }
+ duxDoc := doc.Document{
+ ID: duxSeries.ID.Bytes(),
+ Fields: []doc.Field{
+ doc.Field{Name: []byte("city"), Value: []byte("los_angeles")},
+ },
+ }
+
+ seriesMaps := generate.BlocksByStart([]generate.BlockConfig{
+ {
+ IDs: []string{fooSeries.ID.String()},
+ Tags: fooSeries.Tags,
+ NumPoints: 100,
+ Start: now.Add(-blockSize),
+ },
+ {
+ IDs: []string{barSeries.ID.String()},
+ Tags: barSeries.Tags,
+ NumPoints: 100,
+ Start: now.Add(-blockSize),
+ },
+ {
+ IDs: []string{quxSeries.ID.String()},
+ Tags: quxSeries.Tags,
+ NumPoints: 100,
+ Start: now.Add(-blockSize),
+ },
+ {
+ IDs: []string{duxSeries.ID.String()},
+ Tags: duxSeries.Tags,
+ NumPoints: 100,
+ Start: now.Add(-blockSize),
+ },
+ {
+ IDs: []string{fooSeries.ID.String()},
+ Tags: fooSeries.Tags,
+ NumPoints: 50,
+ Start: now,
+ },
+ {
+ IDs: []string{bazSeries.ID.String()},
+ Tags: bazSeries.Tags,
+ NumPoints: 50,
+ Start: now,
+ },
+ })
+ defaultIndexDocs := []doc.Document{
+ fooDoc,
+ barDoc,
+ bazDoc,
+ }
+ extraIndexDocs := []doc.Document{
+ quxDoc,
+ duxDoc,
+ }
+
+ require.NoError(t, writeTestDataToDisk(ns1, setup, seriesMaps, 0))
+ require.NoError(t, writeTestDataToDisk(ns2, setup, nil, 0))
+ require.NoError(t, writeTestIndexDataToDisk(
+ ns1,
+ setup.StorageOpts(),
+ idxpersist.DefaultIndexVolumeType,
+ now.Add(-blockSize),
+ setup.ShardSet().AllIDs(),
+ defaultIndexDocs,
+ ))
+ require.NoError(t, writeTestIndexDataToDisk(
+ ns1,
+ setup.StorageOpts(),
+ idxpersist.IndexVolumeType("extra"),
+ now.Add(-blockSize),
+ setup.ShardSet().AllIDs(),
+ extraIndexDocs,
+ ))
+
+ // Start the server with filesystem bootstrapper
+ log := setup.StorageOpts().InstrumentOptions().Logger()
+ log.Debug("filesystem bootstrap multiple index volume types test")
+ require.NoError(t, setup.StartServer())
+ log.Debug("server is now up")
+
+ // Stop the server
+ defer func() {
+ require.NoError(t, setup.StopServer())
+ log.Debug("server is now down")
+ }()
+
+ // Verify data matches what we expect
+ verifySeriesMaps(t, setup, testNamespaces[0], seriesMaps)
+ verifySeriesMaps(t, setup, testNamespaces[1], nil)
+
+ // Issue some index queries
+ session, err := setup.M3DBClient().DefaultSession()
+ require.NoError(t, err)
+
+ start := now.Add(-rOpts.RetentionPeriod())
+ end := now.Add(blockSize)
+ queryOpts := index.QueryOptions{StartInclusive: start, EndExclusive: end}
+
+ // Match all new_*r*
+ regexpQuery, err := idx.NewRegexpQuery([]byte("city"), []byte("new_.*r.*"))
+ require.NoError(t, err)
+ iter, fetchResponse, err := session.FetchTaggedIDs(ns1.ID(),
+ index.Query{Query: regexpQuery}, queryOpts)
+ require.NoError(t, err)
+ defer iter.Finalize()
+
+ verifyQueryMetadataResults(t, iter, fetchResponse.Exhaustive, verifyQueryMetadataResultsOptions{
+ namespace: ns1.ID(),
+ exhaustive: true,
+ expected: []generate.Series{fooSeries, barSeries, quxSeries},
+ })
+
+ // Match all *e*e*
+ regexpQuery, err = idx.NewRegexpQuery([]byte("city"), []byte(".*e.*e.*"))
+ require.NoError(t, err)
+ iter, fetchResponse, err = session.FetchTaggedIDs(ns1.ID(),
+ index.Query{Query: regexpQuery}, queryOpts)
+ require.NoError(t, err)
+ defer iter.Finalize()
+
+ verifyQueryMetadataResults(t, iter, fetchResponse.Exhaustive, verifyQueryMetadataResultsOptions{
+ namespace: ns1.ID(),
+ exhaustive: true,
+ expected: []generate.Series{barSeries, bazSeries, duxSeries},
+ })
+}
diff --git a/src/dbnode/integration/fs_bootstrap_multi_ns_test.go b/src/dbnode/integration/fs_bootstrap_multi_ns_test.go
index e47cf5910c..9d63a1ccc5 100644
--- a/src/dbnode/integration/fs_bootstrap_multi_ns_test.go
+++ b/src/dbnode/integration/fs_bootstrap_multi_ns_test.go
@@ -28,12 +28,7 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
- persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/stretchr/testify/require"
)
@@ -56,47 +51,22 @@ func TestFilesystemBootstrapMultipleNamespaces(t *testing.T) {
require.NoError(t, err)
ns2, err := namespace.NewMetadata(testNamespaces[1], namespace.NewOptions().SetRetentionOptions(ns2ROpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ WithFileSystem: true,
+ }))
- persistMgr, err := persistfs.NewPersistManager(fsOpts)
- require.NoError(t, err)
-
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := result.NewOptions().
- SetSeriesCachePolicy(setup.storageOpts.SeriesCachePolicy())
- storageIdxOpts := setup.storageOpts.IndexOptions()
- bfsOpts := fs.NewOptions().
- SetResultOptions(bsOpts).
- SetFilesystemOptions(fsOpts).
- SetIndexOptions(storageIdxOpts).
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
- SetPersistManager(persistMgr).
- SetCompactor(newCompactor(t, storageIdxOpts))
-
- bs, err := fs.NewFileSystemBootstrapperProvider(bfsOpts, noOpAll)
- require.NoError(t, err)
-
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- processProvider, err := bootstrap.NewProcessProvider(bs, processOpts, bsOpts)
- require.NoError(t, err)
-
- setup.storageOpts = setup.storageOpts.
- SetBootstrapProcessProvider(processProvider)
-
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("generating data")
// Write test data
- now := setup.getNowFn()
+ now := setup.NowFn()()
ns1SeriesMaps := generate.BlocksByStart([]generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now.Add(-ns1BlockSize)},
{IDs: []string{"foo", "baz"}, NumPoints: 50, Start: now},
@@ -112,12 +82,12 @@ func TestFilesystemBootstrapMultipleNamespaces(t *testing.T) {
// Start the server with filesystem bootstrapper
log.Info("filesystem bootstrap test")
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Info("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Info("server is now down")
}()
diff --git a/src/dbnode/integration/fs_bootstrap_tags_test.go b/src/dbnode/integration/fs_bootstrap_tags_test.go
index f1b4a7d2a3..2a4021f607 100644
--- a/src/dbnode/integration/fs_bootstrap_tags_test.go
+++ b/src/dbnode/integration/fs_bootstrap_tags_test.go
@@ -28,12 +28,7 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
- persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/x/ident"
"github.com/stretchr/testify/require"
@@ -55,43 +50,20 @@ func TestFilesystemBootstrapTagsWithIndexingDisabled(t *testing.T) {
ns2, err := namespace.NewMetadata(testNamespaces[1], nOpts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
// Test setup
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
-
- persistMgr, err := persistfs.NewPersistManager(fsOpts)
- require.NoError(t, err)
-
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := result.NewOptions().
- SetSeriesCachePolicy(setup.storageOpts.SeriesCachePolicy())
- storageIdxOpts := setup.storageOpts.IndexOptions()
- bfsOpts := fs.NewOptions().
- SetResultOptions(bsOpts).
- SetFilesystemOptions(fsOpts).
- SetIndexOptions(storageIdxOpts).
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
- SetPersistManager(persistMgr).
- SetCompactor(newCompactor(t, storageIdxOpts))
- bs, err := fs.NewFileSystemBootstrapperProvider(bfsOpts, noOpAll)
- require.NoError(t, err)
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- processProvider, err := bootstrap.NewProcessProvider(bs, processOpts, bsOpts)
- require.NoError(t, err)
-
- setup.storageOpts = setup.storageOpts.
- SetBootstrapProcessProvider(processProvider)
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ WithFileSystem: true,
+ }))
// Write test data
- now := setup.getNowFn()
+ now := setup.NowFn()()
seriesMaps := generate.BlocksByStart([]generate.BlockConfig{
{
IDs: []string{"foo"},
@@ -122,14 +94,14 @@ func TestFilesystemBootstrapTagsWithIndexingDisabled(t *testing.T) {
require.NoError(t, writeTestDataToDisk(ns2, setup, nil, 0))
// Start the server with filesystem bootstrapper
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Debug("filesystem bootstrap test")
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
diff --git a/src/dbnode/integration/fs_bootstrap_test.go b/src/dbnode/integration/fs_bootstrap_test.go
index 599d971ad5..a6451dd7bf 100644
--- a/src/dbnode/integration/fs_bootstrap_test.go
+++ b/src/dbnode/integration/fs_bootstrap_test.go
@@ -28,12 +28,7 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
- persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/stretchr/testify/require"
)
@@ -60,7 +55,7 @@ func testFilesystemBootstrap(t *testing.T, setTestOpts setTestOptions, updateInp
ns2, err := namespace.NewMetadata(testNamespaces[1], namespace.NewOptions().SetRetentionOptions(rOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1, ns2})
if setTestOpts != nil {
opts = setTestOpts(t, opts)
@@ -69,39 +64,16 @@ func testFilesystemBootstrap(t *testing.T, setTestOpts setTestOptions, updateInp
}
// Test setup
- setup, err := newTestSetup(t, opts, nil)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
+ defer setup.Close()
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
-
- persistMgr, err := persistfs.NewPersistManager(fsOpts)
- require.NoError(t, err)
-
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := result.NewOptions().
- SetSeriesCachePolicy(setup.storageOpts.SeriesCachePolicy())
- storageIdxOpts := setup.storageOpts.IndexOptions()
- bfsOpts := fs.NewOptions().
- SetResultOptions(bsOpts).
- SetFilesystemOptions(fsOpts).
- SetIndexOptions(storageIdxOpts).
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
- SetPersistManager(persistMgr).
- SetCompactor(newCompactor(t, storageIdxOpts))
- bs, err := fs.NewFileSystemBootstrapperProvider(bfsOpts, noOpAll)
- require.NoError(t, err)
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- processProvider, err := bootstrap.NewProcessProvider(bs, processOpts, bsOpts)
- require.NoError(t, err)
-
- setup.storageOpts = setup.storageOpts.
- SetBootstrapProcessProvider(processProvider)
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ WithFileSystem: true,
+ }))
// Write test data
- now := setup.getNowFn()
+ now := setup.NowFn()()
inputData := []generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now.Add(-blockSize)},
{IDs: []string{"foo", "baz"}, NumPoints: 50, Start: now},
@@ -114,14 +86,14 @@ func testFilesystemBootstrap(t *testing.T, setTestOpts setTestOptions, updateInp
require.NoError(t, writeTestDataToDisk(ns2, setup, nil, 0))
// Start the server with filesystem bootstrapper
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Debug("filesystem bootstrap test")
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
diff --git a/src/dbnode/integration/fs_commitlog_mixed_mode_read_write_test.go b/src/dbnode/integration/fs_commitlog_mixed_mode_read_write_test.go
index 1e720a0bcc..dc98a1c4b7 100644
--- a/src/dbnode/integration/fs_commitlog_mixed_mode_read_write_test.go
+++ b/src/dbnode/integration/fs_commitlog_mixed_mode_read_write_test.go
@@ -29,13 +29,7 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
- persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/dbnode/runtime"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- bcl "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/commitlog"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
@@ -72,7 +66,7 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
SetRetentionOptions(ns1ROpts)
ns1, err := namespace.NewMetadata(nsID, ns1Opts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1})
if setTestOpts != nil {
@@ -82,12 +76,12 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
// Test setup
setup := newTestSetupWithCommitLogAndFilesystemBootstrapper(t, opts)
- defer setup.close()
+ defer setup.Close()
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Info("commit log & fileset files, write, read, and merge bootstrap test")
- filePathPrefix := setup.storageOpts.CommitLogOptions().FilesystemOptions().FilePathPrefix()
+ filePathPrefix := setup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix()
// setting time to 2017/02/13 15:30:10
fakeStart := time.Date(2017, time.February, 13, 15, 30, 10, 0, time.Local)
@@ -95,7 +89,7 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
blkStart16 := blkStart15.Add(ns1BlockSize)
blkStart17 := blkStart16.Add(ns1BlockSize)
blkStart18 := blkStart17.Add(ns1BlockSize)
- setup.setNowFn(fakeStart)
+ setup.SetNowFn(fakeStart)
// startup server
log.Debug("starting server")
@@ -105,7 +99,7 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
// Stop the server
defer func() {
log.Debug("stopping server")
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
@@ -115,7 +109,7 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
var (
total = 200
ids = &idGen{longTestID}
- db = setup.db
+ db = setup.DB()
ctx = context.NewContext()
)
defer ctx.Close()
@@ -123,7 +117,7 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
datapoints := generateDatapoints(fakeStart, total, ids, annGen)
for _, dp := range datapoints {
ts := dp.time
- setup.setNowFn(ts)
+ setup.SetNowFn(ts)
require.NoError(t, db.Write(ctx, nsID, dp.series, ts, dp.value, xtime.Second, dp.ann))
}
log.Info("wrote datapoints")
@@ -141,16 +135,16 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
waitTimeout := 5 * time.Minute
log.Info("waiting till expected fileset files have been written")
- require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, setup.shardSet, nsID, expectedFlushedData, waitTimeout))
+ require.NoError(t, waitUntilDataFilesFlushed(filePathPrefix, setup.ShardSet(), nsID, expectedFlushedData, waitTimeout))
log.Info("expected fileset files have been written")
// stopping db
log.Info("stopping database")
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Info("database stopped")
// the time now is 18:55
- setup.setNowFn(setup.getNowFn().Add(5 * time.Minute))
+ setup.SetNowFn(setup.NowFn()().Add(5 * time.Minute))
// recreate the db from the data files and commit log
// should contain data from 15:30 - 17:59 on disk and 18:00 - 18:50 in mem
@@ -161,7 +155,7 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
log.Info("verified data in database equals expected data")
// the time now is 19:15
- setup.setNowFn(setup.getNowFn().Add(20 * time.Minute))
+ setup.SetNowFn(setup.NowFn()().Add(20 * time.Minute))
// data from hour 15 is now outdated, ensure the file has been cleaned up
log.Info("waiting till expired fileset files have been cleanedup")
require.NoError(t, waitUntilFileSetFilesCleanedUp(setup, nsID, blkStart15, waitTimeout))
@@ -169,7 +163,7 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
// stopping db
log.Info("stopping database")
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Info("database stopped")
// recreate the db from the data files and commit log
@@ -188,29 +182,29 @@ func testFsCommitLogMixedModeReadWrite(t *testing.T, setTestOpts setTestOptions,
// inspection and commitlog bootstrapper are generated each time.
func startServerWithNewInspection(
t *testing.T,
- opts testOptions,
- setup *testSetup,
+ opts TestOptions,
+ setup TestSetup,
) {
setCommitLogAndFilesystemBootstrapper(t, opts, setup)
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
}
func waitUntilFileSetFilesCleanedUp(
- setup *testSetup,
+ setup TestSetup,
namespace ident.ID,
toDelete time.Time,
timeout time.Duration,
) error {
var (
- shardSet = setup.shardSet
+ shardSet = setup.ShardSet()
filesetFiles = []cleanupTimesFileSet{}
commitLogFiles = cleanupTimesCommitLog{
- clOpts: setup.storageOpts.CommitLogOptions(),
+ clOpts: setup.StorageOpts().CommitLogOptions(),
}
)
for _, id := range shardSet.AllIDs() {
filesetFiles = append(filesetFiles, cleanupTimesFileSet{
- filePathPrefix: setup.filePathPrefix,
+ filePathPrefix: setup.FilePathPrefix(),
namespace: namespace,
shard: id,
times: []time.Time{toDelete},
@@ -219,8 +213,8 @@ func waitUntilFileSetFilesCleanedUp(
return waitUntilDataCleanedUpExtended(filesetFiles, commitLogFiles, timeout)
}
-func newTestSetupWithCommitLogAndFilesystemBootstrapper(t *testing.T, opts testOptions) *testSetup {
- setup, err := newTestSetup(t, opts, nil)
+func newTestSetupWithCommitLogAndFilesystemBootstrapper(t *testing.T, opts TestOptions) TestSetup {
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
setCommitLogAndFilesystemBootstrapper(t, opts, setup)
@@ -228,52 +222,22 @@ func newTestSetupWithCommitLogAndFilesystemBootstrapper(t *testing.T, opts testO
return setup
}
-func setCommitLogAndFilesystemBootstrapper(t *testing.T, opts testOptions, setup *testSetup) *testSetup {
- commitLogOpts := setup.storageOpts.CommitLogOptions()
- fsOpts := commitLogOpts.FilesystemOptions()
+func setCommitLogAndFilesystemBootstrapper(t *testing.T, opts TestOptions, setup TestSetup) TestSetup {
+ commitLogOpts := setup.StorageOpts().CommitLogOptions()
commitLogOpts = commitLogOpts.
SetFlushInterval(defaultIntegrationTestFlushInterval)
- setup.storageOpts = setup.storageOpts.SetCommitLogOptions(commitLogOpts)
-
- // commit log bootstrapper
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := newDefaulTestResultOptions(setup.storageOpts)
- bclOpts := bcl.NewOptions().
- SetResultOptions(bsOpts).
- SetCommitLogOptions(commitLogOpts).
- SetRuntimeOptionsManager(runtime.NewOptionsManager())
-
- commitLogBootstrapper, err := bcl.NewCommitLogBootstrapperProvider(
- bclOpts, mustInspectFilesystem(fsOpts), noOpAll)
- require.NoError(t, err)
-
- // fs bootstrapper
- persistMgr, err := persistfs.NewPersistManager(fsOpts)
- require.NoError(t, err)
+ setup.SetStorageOpts(setup.StorageOpts().SetCommitLogOptions(commitLogOpts))
- storageIdxOpts := setup.storageOpts.IndexOptions()
- bfsOpts := fs.NewOptions().
- SetResultOptions(bsOpts).
- SetFilesystemOptions(fsOpts).
- SetIndexOptions(storageIdxOpts).
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
- SetPersistManager(persistMgr).
- SetCompactor(newCompactor(t, storageIdxOpts))
-
- fsBootstrapper, err := fs.NewFileSystemBootstrapperProvider(bfsOpts, commitLogBootstrapper)
- require.NoError(t, err)
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ CommitLogOptions: commitLogOpts,
+ WithCommitLog: true,
+ WithFileSystem: true,
+ }))
// Need to make sure we have an active m3dbAdminClient because the previous one
- // may have been shutdown by stopServer().
- setup.maybeResetClients()
- // bootstrapper storage opts
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- processProvider, err := bootstrap.NewProcessProvider(fsBootstrapper, processOpts, bsOpts)
- require.NoError(t, err)
- setup.storageOpts = setup.storageOpts.SetBootstrapProcessProvider(processProvider)
+ // may have been shutdown by StopServer().
+ setup.MaybeResetClients()
return setup
}
@@ -338,8 +302,9 @@ func (d dataPointsInTimeOrder) toSeriesMap(blockSize time.Duration) generate.Ser
dp = generate.Series{ID: point.series}
}
dp.Data = append(dp.Data, generate.TestValue{Datapoint: ts.Datapoint{
- Timestamp: t,
- Value: point.value,
+ Timestamp: t,
+ TimestampNanos: xtime.ToUnixNano(t),
+ Value: point.value,
}, Annotation: point.ann})
seriesBlock[idString] = dp
blockStartToSeriesMap[xtime.ToUnixNano(trunc)] = seriesBlock
diff --git a/src/dbnode/integration/fs_commitlog_snapshot_mixed_mode_read_write_prop_test.go b/src/dbnode/integration/fs_commitlog_snapshot_mixed_mode_read_write_prop_test.go
index 884d2da6dd..7e209dee63 100644
--- a/src/dbnode/integration/fs_commitlog_snapshot_mixed_mode_read_write_prop_test.go
+++ b/src/dbnode/integration/fs_commitlog_snapshot_mixed_mode_read_write_prop_test.go
@@ -29,8 +29,8 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/x/context"
xtime "github.com/m3db/m3/src/x/time"
"go.uber.org/zap"
@@ -125,19 +125,19 @@ func TestFsCommitLogMixedModeReadWriteProp(t *testing.T) {
if err != nil {
return false, err
}
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1})
// Test setup
setup := newTestSetupWithCommitLogAndFilesystemBootstrapper(t, opts)
- defer setup.close()
+ defer setup.Close()
- log := setup.storageOpts.InstrumentOptions().Logger()
+ log := setup.StorageOpts().InstrumentOptions().Logger()
log.Sugar().Info("blockSize: %s\n", ns1ROpts.BlockSize().String())
log.Sugar().Info("bufferPast: %s\n", ns1ROpts.BufferPast().String())
log.Sugar().Info("bufferFuture: %s\n", ns1ROpts.BufferFuture().String())
- setup.setNowFn(fakeStart)
+ setup.SetNowFn(fakeStart)
var (
ids = &idGen{longTestID}
@@ -148,7 +148,7 @@ func TestFsCommitLogMixedModeReadWriteProp(t *testing.T) {
latestToCheck = datapoints[len(datapoints)-1].time.Add(ns1BlockSize)
timesToRestart = []time.Time{}
start = earliestToCheck
- filePathPrefix = setup.storageOpts.CommitLogOptions().FilesystemOptions().FilePathPrefix()
+ filePathPrefix = setup.StorageOpts().CommitLogOptions().FilesystemOptions().FilePathPrefix()
)
// Generate randomly selected times during which the node will restart
@@ -179,9 +179,9 @@ func TestFsCommitLogMixedModeReadWriteProp(t *testing.T) {
break
}
- setup.setNowFn(ts)
+ setup.SetNowFn(ts)
- err := setup.db.Write(ctx, nsID, dp.series, ts, dp.value, xtime.Second, dp.ann)
+ err := setup.DB().Write(ctx, nsID, dp.series, ts, dp.value, xtime.Second, dp.ann)
if err != nil {
log.Warn("error writing series datapoint", zap.Error(err))
return false, err
@@ -203,7 +203,7 @@ func TestFsCommitLogMixedModeReadWriteProp(t *testing.T) {
if input.waitForFlushFiles {
log.Info("waiting for data files to be flushed")
var (
- now = setup.getNowFn()
+ now = setup.NowFn()()
endOfLatestFlushableBlock = retention.FlushTimeEnd(ns1ROpts, now).
// Add block size because FlushTimeEnd will return the beginning of the
// latest flushable block.
@@ -212,7 +212,7 @@ func TestFsCommitLogMixedModeReadWriteProp(t *testing.T) {
// be available on disk.
expectedFlushedData = datapoints.before(endOfLatestFlushableBlock).toSeriesMap(ns1BlockSize)
err = waitUntilDataFilesFlushed(
- filePathPrefix, setup.shardSet, nsID, expectedFlushedData, maxFlushWaitTime)
+ filePathPrefix, setup.ShardSet(), nsID, expectedFlushedData, maxFlushWaitTime)
)
if err != nil {
return false, fmt.Errorf("error waiting for data files to flush: %s", err)
@@ -221,16 +221,16 @@ func TestFsCommitLogMixedModeReadWriteProp(t *testing.T) {
if input.waitForSnapshotFiles {
log.Info("waiting for snapshot files to be written")
- now := setup.getNowFn()
+ now := setup.NowFn()()
var snapshotBlock time.Time
if now.Add(-bufferPast).Truncate(ns1BlockSize).Equal(now.Truncate(ns1BlockSize)) {
snapshotBlock = now.Truncate(ns1BlockSize)
} else {
snapshotBlock = now.Truncate(ns1BlockSize).Add(-ns1BlockSize)
}
- err := waitUntilSnapshotFilesFlushed(
+ _, err := waitUntilSnapshotFilesFlushed(
filePathPrefix,
- setup.shardSet,
+ setup.ShardSet(),
nsID,
[]snapshotID{{blockStart: snapshotBlock}},
maxFlushWaitTime,
@@ -240,19 +240,19 @@ func TestFsCommitLogMixedModeReadWriteProp(t *testing.T) {
}
}
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
// Create a new test setup because databases do not have a completely
// clean shutdown, so they can end up in a bad state where the persist
// manager is not idle and thus no more flushes can be done, even if
// there are no other in-progress flushes.
- oldNow := setup.getNowFn()
+ oldNow := setup.NowFn()()
setup = newTestSetupWithCommitLogAndFilesystemBootstrapper(
// FilePathPrefix is randomly generated if not provided, so we need
// to make sure all our test setups have the same prefix so that
// they can find each others files.
t, opts.SetFilePathPrefix(filePathPrefix))
// Make sure the new setup has the same system time as the previous one.
- setup.setNowFn(oldNow)
+ setup.SetNowFn(oldNow)
}
if lastDatapointsIdx != len(datapoints) {
diff --git a/src/dbnode/integration/fs_data_expiry_bootstrap_test.go b/src/dbnode/integration/fs_data_expiry_bootstrap_test.go
index 261b78c61a..4d020d183e 100644
--- a/src/dbnode/integration/fs_data_expiry_bootstrap_test.go
+++ b/src/dbnode/integration/fs_data_expiry_bootstrap_test.go
@@ -28,13 +28,7 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
- "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/dbnode/storage/block"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
- bfs "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/stretchr/testify/require"
)
@@ -52,67 +46,27 @@ func TestFilesystemDataExpiryBootstrap(t *testing.T) {
SetBufferFuture(2 * time.Minute).
SetBlockDataExpiry(true)
blockSize = ropts.BlockSize()
- setup *testSetup
+ setup TestSetup
err error
)
namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(ropts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp})
- retrieverOpts := fs.NewBlockRetrieverOptions().
- SetBlockLeaseManager(&block.NoopLeaseManager{})
-
- blockRetrieverMgr := block.NewDatabaseBlockRetrieverManager(
- func(md namespace.Metadata) (block.DatabaseBlockRetriever, error) {
- retriever, err := fs.NewBlockRetriever(retrieverOpts, setup.fsOpts)
- if err != nil {
- return nil, err
- }
-
- if err := retriever.Open(md); err != nil {
- return nil, err
- }
- return retriever, nil
- })
-
- opts = opts.SetDatabaseBlockRetrieverManager(blockRetrieverMgr)
-
- setup, err = newTestSetup(t, opts, nil)
+ setup, err = NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
-
- log := setup.logger
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
+ defer setup.Close()
- persistMgr, err := fs.NewPersistManager(fsOpts)
- require.NoError(t, err)
-
- noOpAll := bootstrapper.NewNoOpAllBootstrapperProvider()
- bsOpts := result.NewOptions().
- SetSeriesCachePolicy(setup.storageOpts.SeriesCachePolicy())
- storageIdxOpts := setup.storageOpts.IndexOptions()
- bfsOpts := bfs.NewOptions().
- SetResultOptions(bsOpts).
- SetIndexOptions(storageIdxOpts).
- SetFilesystemOptions(fsOpts).
- SetDatabaseBlockRetrieverManager(blockRetrieverMgr).
- SetPersistManager(persistMgr).
- SetCompactor(newCompactor(t, storageIdxOpts))
- bs, err := bfs.NewFileSystemBootstrapperProvider(bfsOpts, noOpAll)
- require.NoError(t, err)
- processOpts := bootstrap.NewProcessOptions().
- SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
- processProvider, err := bootstrap.NewProcessProvider(bs, processOpts, bsOpts)
- require.NoError(t, err)
+ log := setup.StorageOpts().InstrumentOptions().Logger()
- setup.storageOpts = setup.storageOpts.
- SetBootstrapProcessProvider(processProvider)
+ require.NoError(t, setup.InitializeBootstrappers(InitializeBootstrappersOptions{
+ WithFileSystem: true,
+ }))
// Write test data
- now := setup.getNowFn()
+ now := setup.NowFn()()
seriesMaps := generate.BlocksByStart([]generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now.Add(-blockSize)},
})
@@ -120,12 +74,12 @@ func TestFilesystemDataExpiryBootstrap(t *testing.T) {
// Start the server with filesystem bootstrapper
log.Debug("filesystem data expiry bootstrap test")
- require.NoError(t, setup.startServer())
+ require.NoError(t, setup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, setup.stopServer())
+ require.NoError(t, setup.StopServer())
log.Debug("server is now down")
}()
diff --git a/src/dbnode/integration/generate/generate.go b/src/dbnode/integration/generate/generate.go
index 8f80b309fb..993248ca94 100644
--- a/src/dbnode/integration/generate/generate.go
+++ b/src/dbnode/integration/generate/generate.go
@@ -54,15 +54,17 @@ func Block(conf BlockConfig) SeriesBlock {
if conf.AnnGen == nil {
datapoints = append(datapoints, TestValue{
Datapoint: ts.Datapoint{
- Timestamp: timestamp,
- Value: testgen.GenerateFloatVal(r, 3, 1),
+ Timestamp: timestamp,
+ TimestampNanos: xtime.ToUnixNano(timestamp),
+ Value: testgen.GenerateFloatVal(r, 3, 1),
},
})
} else {
datapoints = append(datapoints, TestValue{
Datapoint: ts.Datapoint{
- Timestamp: timestamp,
- Value: 0,
+ Timestamp: timestamp,
+ TimestampNanos: xtime.ToUnixNano(timestamp),
+ Value: 0,
},
Annotation: conf.AnnGen.Next(),
})
@@ -95,8 +97,8 @@ func ToPointsByTime(seriesMaps SeriesBlocksByStart) SeriesDataPointsByTime {
for _, blk := range blks {
for _, dp := range blk.Data {
pointsByTime = append(pointsByTime, SeriesDataPoint{
- ID: blk.ID,
- Value: dp,
+ ID: blk.ID,
+ Value: dp,
})
}
}
diff --git a/src/dbnode/integration/generate/writer.go b/src/dbnode/integration/generate/writer.go
index 3f023d0199..419f99e2f1 100644
--- a/src/dbnode/integration/generate/writer.go
+++ b/src/dbnode/integration/generate/writer.go
@@ -23,7 +23,6 @@ package generate
import (
"time"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/encoding"
ns "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
@@ -219,8 +218,10 @@ func writeToDiskWithPredicate(
}
data[0] = segment.Head
data[1] = segment.Tail
- checksum := digest.SegmentChecksum(segment)
- err = writer.WriteAll(series.ID, series.Tags, data, checksum)
+ checksum := segment.CalculateChecksum()
+ metadata := persist.NewMetadataFromIDAndTags(series.ID, series.Tags,
+ persist.MetadataOptions{})
+ err = writer.WriteAll(metadata, data, checksum)
if err != nil {
return err
}
diff --git a/src/dbnode/integration/index_block_flush_test.go b/src/dbnode/integration/index_block_flush_test.go
index 1ebd67b27e..723785d731 100644
--- a/src/dbnode/integration/index_block_flush_test.go
+++ b/src/dbnode/integration/index_block_flush_test.go
@@ -26,10 +26,10 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/index"
- "github.com/m3db/m3/src/dbnode/namespace"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
"github.com/m3db/m3/src/m3ninx/idx"
xclock "github.com/m3db/m3/src/x/clock"
@@ -78,50 +78,50 @@ func TestIndexBlockFlush(t *testing.T) {
SetBlockSize(indexBlockSize).SetEnabled(true)))
require.NoError(t, err)
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{md}).
SetWriteNewSeriesAsync(true)
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
reporter := xmetrics.NewTestStatsReporter(xmetrics.NewTestStatsReporterOptions())
scope, closer := tally.NewRootScope(
tally.ScopeOptions{Reporter: reporter}, time.Millisecond)
defer closer.Close()
- testSetup.storageOpts = testSetup.storageOpts.SetInstrumentOptions(
- instrument.NewOptions().SetMetricsScope(scope))
+ testSetup.SetStorageOpts(testSetup.StorageOpts().SetInstrumentOptions(
+ instrument.NewOptions().SetMetricsScope(scope)))
t0 := time.Date(2018, time.May, 6, 13, 0, 0, 0, time.UTC)
assert.True(t, t0.Equal(t0.Truncate(indexBlockSize)))
t1 := t0.Add(20 * time.Minute)
t2 := t0.Add(2 * time.Hour)
- testSetup.setNowFn(t0)
+ testSetup.SetNowFn(t0)
- writesPeriod0 := generateTestIndexWrite(0, numWrites, numTags, t0, t1)
+ writesPeriod0 := GenerateTestIndexWrite(0, numWrites, numTags, t0, t1)
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
- require.NoError(t, testSetup.startServer())
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
- client := testSetup.m3dbClient
+ client := testSetup.M3DBClient()
session, err := client.DefaultSession()
require.NoError(t, err)
log.Info("starting data write")
start := time.Now()
- writesPeriod0.write(t, md.ID(), session)
+ writesPeriod0.Write(t, md.ID(), session)
log.Info("test data written", zap.Duration("took", time.Since(start)))
log.Info("waiting till data is indexed")
indexed := xclock.WaitUntil(func() bool {
- indexPeriod0 := writesPeriod0.numIndexed(t, md.ID(), session)
+ indexPeriod0 := writesPeriod0.NumIndexed(t, md.ID(), session)
return indexPeriod0 == len(writesPeriod0)
}, verifyTimeout)
require.True(t, indexed)
@@ -136,16 +136,16 @@ func TestIndexBlockFlush(t *testing.T) {
period0Results, _, err := session.FetchTagged(
md.ID(), query, index.QueryOptions{StartInclusive: t0, EndExclusive: t1})
require.NoError(t, err)
- writesPeriod0.matchesSeriesIters(t, period0Results)
+ writesPeriod0.MatchesSeriesIters(t, period0Results)
log.Info("found period0 results")
// move time to 3p
- testSetup.setNowFn(t2)
+ testSetup.SetNowFn(t2)
// waiting till filesets found on disk
log.Info("waiting till filesets found on disk")
found := xclock.WaitUntil(func() bool {
- filesets, err := fs.IndexFileSetsAt(testSetup.filePathPrefix, md.ID(), t0)
+ filesets, err := fs.IndexFileSetsAt(testSetup.FilePathPrefix(), md.ID(), t0)
require.NoError(t, err)
return len(filesets) == 1
}, verifyTimeout)
@@ -167,6 +167,6 @@ func TestIndexBlockFlush(t *testing.T) {
period0Results, _, err = session.FetchTagged(
md.ID(), query, index.QueryOptions{StartInclusive: t0, EndExclusive: t1})
require.NoError(t, err)
- writesPeriod0.matchesSeriesIters(t, period0Results)
+ writesPeriod0.MatchesSeriesIters(t, period0Results)
log.Info("found period0 results after flush")
}
diff --git a/src/dbnode/integration/index_block_rotation_test.go b/src/dbnode/integration/index_block_rotation_test.go
index a986ba229f..33438a4d58 100644
--- a/src/dbnode/integration/index_block_rotation_test.go
+++ b/src/dbnode/integration/index_block_rotation_test.go
@@ -26,9 +26,9 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/index"
- "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/m3ninx/idx"
xclock "github.com/m3db/m3/src/x/clock"
@@ -72,42 +72,42 @@ func TestIndexBlockRotation(t *testing.T) {
SetBlockSize(indexBlockSize).SetEnabled(true)))
require.NoError(t, err)
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{md}).
SetWriteNewSeriesAsync(true)
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
t0 := time.Date(2018, time.May, 6, 13, 0, 0, 0, time.UTC)
t1 := t0.Add(20 * time.Minute)
t2 := t1.Add(3 * time.Hour)
- testSetup.setNowFn(t0)
+ testSetup.SetNowFn(t0)
- writesPeriod0 := generateTestIndexWrite(0, numWrites, numTags, t0, t1)
+ writesPeriod0 := GenerateTestIndexWrite(0, numWrites, numTags, t0, t1)
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
- require.NoError(t, testSetup.startServer())
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
- client := testSetup.m3dbClient
+ client := testSetup.M3DBClient()
session, err := client.DefaultSession()
require.NoError(t, err)
log.Info("starting data write")
start := time.Now()
- writesPeriod0.write(t, md.ID(), session)
+ writesPeriod0.Write(t, md.ID(), session)
log.Info("test data written", zap.Duration("took", time.Since(start)))
log.Info("waiting till data is indexed")
indexed := xclock.WaitUntil(func() bool {
- indexPeriod0 := writesPeriod0.numIndexed(t, md.ID(), session)
+ indexPeriod0 := writesPeriod0.NumIndexed(t, md.ID(), session)
return indexPeriod0 == len(writesPeriod0)
}, 5*time.Second)
require.True(t, indexed)
@@ -122,14 +122,14 @@ func TestIndexBlockRotation(t *testing.T) {
period0Results, _, err := session.FetchTagged(
md.ID(), query, index.QueryOptions{StartInclusive: t0, EndExclusive: t1})
require.NoError(t, err)
- writesPeriod0.matchesSeriesIters(t, period0Results)
+ writesPeriod0.MatchesSeriesIters(t, period0Results)
log.Info("found period0 results")
// move time to 4p
- testSetup.setNowFn(t2)
+ testSetup.SetNowFn(t2)
// give tick some time to evict the block
- testSetup.sleepFor10xTickMinimumInterval()
+ testSetup.SleepFor10xTickMinimumInterval()
// ensure all data is absent
log.Info("querying period0 results after expiry")
diff --git a/src/dbnode/integration/index_helpers.go b/src/dbnode/integration/index_helpers.go
index 44736f4af8..e66f380060 100644
--- a/src/dbnode/integration/index_helpers.go
+++ b/src/dbnode/integration/index_helpers.go
@@ -24,6 +24,7 @@ package integration
import (
"fmt"
+ "strconv"
"testing"
"time"
@@ -37,10 +38,12 @@ import (
"github.com/stretchr/testify/require"
)
-type testIndexWrites []testIndexWrite
+// TestIndexWrites holds index writes for testing.
+type TestIndexWrites []testIndexWrite
-func (w testIndexWrites) matchesSeriesIters(t *testing.T, seriesIters encoding.SeriesIterators) {
- writesByID := make(map[string]testIndexWrites)
+// MatchesSeriesIters matches index writes with expected series.
+func (w TestIndexWrites) MatchesSeriesIters(t *testing.T, seriesIters encoding.SeriesIterators) {
+ writesByID := make(map[string]TestIndexWrites)
for _, wi := range w {
writesByID[wi.id.String()] = append(writesByID[wi.id.String()], wi)
}
@@ -54,7 +57,7 @@ func (w testIndexWrites) matchesSeriesIters(t *testing.T, seriesIters encoding.S
}
}
-func (w testIndexWrites) matchesSeriesIter(t *testing.T, iter encoding.SeriesIterator) {
+func (w TestIndexWrites) matchesSeriesIter(t *testing.T, iter encoding.SeriesIterator) {
found := make([]bool, len(w))
count := 0
for iter.Next() {
@@ -81,14 +84,16 @@ func (w testIndexWrites) matchesSeriesIter(t *testing.T, iter encoding.SeriesIte
}
}
-func (w testIndexWrites) write(t *testing.T, ns ident.ID, s client.Session) {
+// Write test data.
+func (w TestIndexWrites) Write(t *testing.T, ns ident.ID, s client.Session) {
for i := 0; i < len(w); i++ {
wi := w[i]
require.NoError(t, s.WriteTagged(ns, wi.id, wi.tags.Duplicate(), wi.ts, wi.value, xtime.Second, nil), "%v", wi)
}
}
-func (w testIndexWrites) numIndexed(t *testing.T, ns ident.ID, s client.Session) int {
+// NumIndexed gets number of indexed series.
+func (w TestIndexWrites) NumIndexed(t *testing.T, ns ident.ID, s client.Session) int {
numFound := 0
for i := 0; i < len(w); i++ {
wi := w[i]
@@ -96,7 +101,7 @@ func (w testIndexWrites) numIndexed(t *testing.T, ns ident.ID, s client.Session)
iter, _, err := s.FetchTaggedIDs(ns, index.Query{Query: q}, index.QueryOptions{
StartInclusive: wi.ts.Add(-1 * time.Second),
EndExclusive: wi.ts.Add(1 * time.Second),
- Limit: 10})
+ SeriesLimit: 10})
if err != nil {
continue
}
@@ -125,10 +130,11 @@ type testIndexWrite struct {
value float64
}
-func generateTestIndexWrite(periodID, numWrites, numTags int, startTime, endTime time.Time) testIndexWrites {
+// GenerateTestIndexWrite generates test index writes.
+func GenerateTestIndexWrite(periodID, numWrites, numTags int, startTime, endTime time.Time) TestIndexWrites {
writes := make([]testIndexWrite, 0, numWrites)
step := endTime.Sub(startTime) / time.Duration(numWrites+1)
- for i := 1; i <= numWrites; i++ {
+ for i := 0; i < numWrites; i++ {
id, tags := genIDTags(periodID, i, numTags)
writes = append(writes, testIndexWrite{
id: id,
@@ -140,7 +146,9 @@ func generateTestIndexWrite(periodID, numWrites, numTags int, startTime, endTime
return writes
}
-func genIDTags(i int, j int, numTags int) (ident.ID, ident.TagIterator) {
+type genIDTagsOption func(ident.Tags) ident.Tags
+
+func genIDTags(i int, j int, numTags int, opts ...genIDTagsOption) (ident.ID, ident.TagIterator) {
id := fmt.Sprintf("foo.%d.%d", i, j)
tags := make([]ident.Tag, 0, numTags)
for i := 0; i < numTags; i++ {
@@ -150,31 +158,58 @@ func genIDTags(i int, j int, numTags int) (ident.ID, ident.TagIterator) {
))
}
tags = append(tags,
- ident.StringTag("commoni", fmt.Sprintf("%d", i)),
+ ident.StringTag("common_i", strconv.Itoa(i)),
+ ident.StringTag("common_j", strconv.Itoa(j)),
ident.StringTag("shared", "shared"))
- return ident.StringID(id), ident.NewTagsIterator(ident.NewTags(tags...))
+
+ result := ident.NewTags(tags...)
+ for _, fn := range opts {
+ result = fn(result)
+ }
+
+ return ident.StringID(id), ident.NewTagsIterator(result)
}
func isIndexed(t *testing.T, s client.Session, ns ident.ID, id ident.ID, tags ident.TagIterator) bool {
+ result, err := isIndexedChecked(t, s, ns, id, tags)
+ if err != nil {
+ return false
+ }
+ return result
+}
+
+func isIndexedChecked(t *testing.T, s client.Session, ns ident.ID, id ident.ID, tags ident.TagIterator) (bool, error) {
q := newQuery(t, tags)
iter, _, err := s.FetchTaggedIDs(ns, index.Query{Query: q}, index.QueryOptions{
StartInclusive: time.Now(),
EndExclusive: time.Now(),
- Limit: 10})
+ SeriesLimit: 10})
if err != nil {
- return false
+ return false, err
}
+
+ defer iter.Finalize()
+
if !iter.Next() {
- return false
+ return false, nil
}
+
cuNs, cuID, cuTag := iter.Current()
+ if err := iter.Err(); err != nil {
+ return false, fmt.Errorf("iter err: %v", err)
+ }
+
if ns.String() != cuNs.String() {
- return false
+ return false, fmt.Errorf("namespace not matched")
}
if id.String() != cuID.String() {
- return false
+ return false, fmt.Errorf("id not matched")
}
- return ident.NewTagIterMatcher(tags).Matches(cuTag)
+ if !ident.NewTagIterMatcher(tags).Matches(cuTag) {
+ return false, fmt.Errorf("tags did not match")
+ }
+
+ return true, nil
}
func newQuery(t *testing.T, tags ident.TagIterator) idx.Query {
diff --git a/src/dbnode/integration/index_multiple_block_query_test.go b/src/dbnode/integration/index_multiple_block_query_test.go
index 23091e2801..b12c3bcf38 100644
--- a/src/dbnode/integration/index_multiple_block_query_test.go
+++ b/src/dbnode/integration/index_multiple_block_query_test.go
@@ -26,9 +26,9 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/index"
- "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/m3ninx/idx"
xclock "github.com/m3db/m3/src/x/clock"
@@ -73,45 +73,45 @@ func TestIndexMultipleBlockQuery(t *testing.T) {
SetBlockSize(indexBlockSize).SetEnabled(true)))
require.NoError(t, err)
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{md}).
SetWriteNewSeriesAsync(true)
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
t0 := time.Date(2018, time.May, 6, 12, 50, 0, 0, time.UTC)
t1 := t0.Add(10 * time.Minute)
t2 := t1.Add(5 * time.Minute)
- testSetup.setNowFn(t1)
+ testSetup.SetNowFn(t1)
- writesPeriod0 := generateTestIndexWrite(0, numWrites, numTags, t0, t1)
- writesPeriod1 := generateTestIndexWrite(1, numWrites, numTags, t1, t2)
+ writesPeriod0 := GenerateTestIndexWrite(0, numWrites, numTags, t0, t1)
+ writesPeriod1 := GenerateTestIndexWrite(1, numWrites, numTags, t1, t2)
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
- require.NoError(t, testSetup.startServer())
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
- client := testSetup.m3dbClient
+ client := testSetup.M3DBClient()
session, err := client.DefaultSession()
require.NoError(t, err)
log.Info("starting data write")
start := time.Now()
- writesPeriod0.write(t, md.ID(), session)
- writesPeriod1.write(t, md.ID(), session)
+ writesPeriod0.Write(t, md.ID(), session)
+ writesPeriod1.Write(t, md.ID(), session)
log.Info("test data written", zap.Duration("took", time.Since(start)))
log.Info("waiting till data is indexed")
indexed := xclock.WaitUntil(func() bool {
- indexPeriod0 := writesPeriod0.numIndexed(t, md.ID(), session)
- indexPeriod1 := writesPeriod1.numIndexed(t, md.ID(), session)
+ indexPeriod0 := writesPeriod0.NumIndexed(t, md.ID(), session)
+ indexPeriod1 := writesPeriod1.NumIndexed(t, md.ID(), session)
return indexPeriod0 == len(writesPeriod0) &&
indexPeriod1 == len(writesPeriod1)
}, 5*time.Second)
@@ -126,14 +126,14 @@ func TestIndexMultipleBlockQuery(t *testing.T) {
period0Results, _, err := session.FetchTagged(
md.ID(), query, index.QueryOptions{StartInclusive: t0, EndExclusive: t1})
require.NoError(t, err)
- writesPeriod0.matchesSeriesIters(t, period0Results)
+ writesPeriod0.MatchesSeriesIters(t, period0Results)
log.Info("found period0 results")
log.Info("querying period1 results")
period1Results, _, err := session.FetchTagged(
md.ID(), query, index.QueryOptions{StartInclusive: t1, EndExclusive: t2})
require.NoError(t, err)
- writesPeriod1.matchesSeriesIters(t, period1Results)
+ writesPeriod1.MatchesSeriesIters(t, period1Results)
log.Info("found period1 results")
log.Info("querying period 0+1 results")
@@ -141,6 +141,6 @@ func TestIndexMultipleBlockQuery(t *testing.T) {
md.ID(), query, index.QueryOptions{StartInclusive: t0, EndExclusive: t2})
require.NoError(t, err)
writes := append(writesPeriod0, writesPeriod1...)
- writes.matchesSeriesIters(t, period01Results)
+ writes.MatchesSeriesIters(t, period01Results)
log.Info("found period 0+1 results")
}
diff --git a/src/dbnode/integration/index_multiple_node_high_concurrency_test.go b/src/dbnode/integration/index_multiple_node_high_concurrency_test.go
index b31f370380..876c03693f 100644
--- a/src/dbnode/integration/index_multiple_node_high_concurrency_test.go
+++ b/src/dbnode/integration/index_multiple_node_high_concurrency_test.go
@@ -74,10 +74,10 @@ func TestIndexMultipleNodeHighConcurrency(t *testing.T) {
clientopts = clientopts.SetReadConsistencyLevel(lvl)
defer closeFn()
- log := nodes[0].storageOpts.InstrumentOptions().Logger()
+ log := nodes[0].StorageOpts().InstrumentOptions().Logger()
// Start the nodes
for _, n := range nodes {
- require.NoError(t, n.startServer())
+ require.NoError(t, n.StartServer())
}
c, err := client.NewClient(clientopts)
@@ -90,7 +90,7 @@ func TestIndexMultipleNodeHighConcurrency(t *testing.T) {
insertWg sync.WaitGroup
numTotalErrors uint32
)
- now := nodes[0].db.Options().ClockOptions().NowFn()()
+ now := nodes[0].DB().Options().ClockOptions().NowFn()()
start := time.Now()
log.Info("starting data write")
diff --git a/src/dbnode/integration/index_single_node_high_concurrency_test.go b/src/dbnode/integration/index_single_node_high_concurrency_test.go
index 83c3e4d481..4bf7767bf7 100644
--- a/src/dbnode/integration/index_single_node_high_concurrency_test.go
+++ b/src/dbnode/integration/index_single_node_high_concurrency_test.go
@@ -24,121 +24,431 @@ package integration
import (
"fmt"
+ "math/rand"
+ "strconv"
"sync"
- "sync/atomic"
"testing"
"time"
"github.com/m3db/m3/src/dbnode/namespace"
- "github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/m3ninx/idx"
xclock "github.com/m3db/m3/src/x/clock"
+ "github.com/m3db/m3/src/x/context"
+ "github.com/m3db/m3/src/x/ident"
+ xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
"go.uber.org/zap"
)
-func TestIndexSingleNodeHighConcurrency(t *testing.T) {
+func TestIndexSingleNodeHighConcurrencyManyTagsLowCardinality(t *testing.T) {
if testing.Short() {
t.SkipNow() // Just skip if we're doing a short run
}
+ testIndexSingleNodeHighConcurrency(t, testIndexHighConcurrencyOptions{
+ concurrencyEnqueueWorker: 8,
+ concurrencyWrites: 5000,
+ enqueuePerWorker: 100,
+ numTags: 10,
+ })
+}
+
+func TestIndexSingleNodeHighConcurrencyFewTagsHighCardinalityNoSkipWrites(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow() // Just skip if we're doing a short run
+ }
+
+ testIndexSingleNodeHighConcurrency(t, testIndexHighConcurrencyOptions{
+ concurrencyEnqueueWorker: 8,
+ concurrencyWrites: 5000,
+ enqueuePerWorker: 10000,
+ numTags: 2,
+ })
+}
+
+func TestIndexSingleNodeHighConcurrencyFewTagsHighCardinalitySkipWrites(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow() // Just skip if we're doing a short run
+ }
+
+ testIndexSingleNodeHighConcurrency(t, testIndexHighConcurrencyOptions{
+ concurrencyEnqueueWorker: 8,
+ concurrencyWrites: 5000,
+ enqueuePerWorker: 10000,
+ numTags: 2,
+ skipWrites: true,
+ })
+}
+
+func TestIndexSingleNodeHighConcurrencyFewTagsHighCardinalityQueryDuringWrites(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow() // Just skip if we're doing a short run
+ }
+
+ testIndexSingleNodeHighConcurrency(t, testIndexHighConcurrencyOptions{
+ concurrencyEnqueueWorker: 8,
+ concurrencyWrites: 5000,
+ enqueuePerWorker: 100000,
+ numTags: 2,
+ concurrencyQueryDuringWrites: 16,
+ concurrencyQueryDuringWritesType: indexQuery,
+ skipVerify: true,
+ })
+}
+
+func TestIndexSingleNodeHighConcurrencyFewTagsHighCardinalityAggregateQueryDuringWrites(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow() // Just skip if we're doing a short run
+ }
+
+ testIndexSingleNodeHighConcurrency(t, testIndexHighConcurrencyOptions{
+ concurrencyEnqueueWorker: 8,
+ concurrencyWrites: 5000,
+ enqueuePerWorker: 100000,
+ numTags: 2,
+ concurrencyQueryDuringWrites: 1,
+ concurrencyQueryDuringWritesType: indexAggregateQuery,
+ skipVerify: true,
+ })
+}
+
+type queryType uint
+
+const (
+ indexQuery queryType = iota
+ indexAggregateQuery
+)
+
+type testIndexHighConcurrencyOptions struct {
+ concurrencyEnqueueWorker int
+ concurrencyWrites int
+ enqueuePerWorker int
+ numTags int
+
+ // skipWrites will mix in skipped to make sure
+ // it doesn't interrupt the regular real-time ingestion pipeline.
+ skipWrites bool
+
+ // concurrencyQueryDuringWrites will issue queries while we
+ // are performing writes.
+ concurrencyQueryDuringWrites int
+
+ // concurrencyQueryDuringWritesType determines the type of queries
+ // to issue performing writes.
+ concurrencyQueryDuringWritesType queryType
+
+ // skipVerify will skip verifying the actual series were indexed
+ // which is useful if just sanity checking can write/read concurrently
+ // without issue/errors and the stats look good.
+ skipVerify bool
+}
+
+func testIndexSingleNodeHighConcurrency(
+ t *testing.T,
+ opts testIndexHighConcurrencyOptions,
+) {
+ // Test setup
+ md, err := namespace.NewMetadata(testNamespaces[0],
+ namespace.NewOptions().
+ SetRetentionOptions(defaultIntegrationTestRetentionOpts).
+ SetCleanupEnabled(false).
+ SetSnapshotEnabled(false).
+ SetFlushEnabled(false).
+ SetColdWritesEnabled(true).
+ SetIndexOptions(namespace.NewIndexOptions().SetEnabled(true)))
+ require.NoError(t, err)
+
+ testOpts := NewTestOptions(t).
+ SetNamespaces([]namespace.Metadata{md}).
+ SetWriteNewSeriesAsync(true).
+ // Use default time functions (server time not frozen).
+ SetNowFn(time.Now)
+ testSetup, err := NewTestSetup(t, testOpts, nil,
+ func(s storage.Options) storage.Options {
+ if opts.skipWrites {
+ return s.SetDoNotIndexWithFieldsMap(map[string]string{"skip": "true"})
+ }
+ return s
+ })
+ require.NoError(t, err)
+ defer testSetup.Close()
+
+ // Start the server
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
+
+ // Stop the server
+ defer func() {
+ require.NoError(t, testSetup.StopServer())
+ log.Debug("server is now down")
+ }()
+
+ client := testSetup.M3DBClient()
+ session, err := client.DefaultSession()
+ require.NoError(t, err)
+
var (
- concurrency = 10
- writeEach = 100
- numTags = 10
+ wg sync.WaitGroup
+ numTotalErrors = atomic.NewUint32(0)
+ numTotalSuccess = atomic.NewUint32(0)
)
+ nowFn := testSetup.DB().Options().ClockOptions().NowFn()
+ start := time.Now()
+ log.Info("starting data write",
+ zap.Time("serverTime", nowFn()))
+
+ workerPool := xsync.NewWorkerPool(opts.concurrencyWrites)
+ workerPool.Init()
+
+ for i := 0; i < opts.concurrencyEnqueueWorker; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
- levels := []topology.ReadConsistencyLevel{
- topology.ReadConsistencyLevelOne,
- topology.ReadConsistencyLevelUnstrictMajority,
- topology.ReadConsistencyLevelMajority,
- topology.ReadConsistencyLevelAll,
+ for j := 0; j < opts.enqueuePerWorker; j++ {
+ j := j
+ wg.Add(1)
+ workerPool.Go(func() {
+ defer wg.Done()
+
+ var genOpts []genIDTagsOption
+ if opts.skipWrites && j%2 == 0 {
+ genOpts = append(genOpts, genIDTagsOption(func(t ident.Tags) ident.Tags {
+ t.Append(ident.Tag{
+ Name: ident.StringID("skip"),
+ Value: ident.StringID("true"),
+ })
+ return t
+ }))
+ }
+
+ id, tags := genIDTags(i, j, opts.numTags, genOpts...)
+ timestamp := time.Now()
+ err := session.WriteTagged(md.ID(), id, tags,
+ timestamp, float64(j), xtime.Second, nil)
+ if err != nil {
+ if n := numTotalErrors.Inc(); n < 10 {
+ // Log the first 10 errors for visibility but not flood.
+ log.Error("sampled write error", zap.Error(err))
+ }
+ } else {
+ numTotalSuccess.Inc()
+ }
+ })
+ }
+ }()
}
- for _, lvl := range levels {
- t.Run(
- fmt.Sprintf("running test for %v", lvl),
- func(t *testing.T) {
- // Test setup
- md, err := namespace.NewMetadata(testNamespaces[0],
- namespace.NewOptions().
- SetRetentionOptions(defaultIntegrationTestRetentionOpts).
- SetCleanupEnabled(false).
- SetSnapshotEnabled(false).
- SetFlushEnabled(false).
- SetIndexOptions(namespace.NewIndexOptions().SetEnabled(true)))
- require.NoError(t, err)
-
- testOpts := newTestOptions(t).
- SetNamespaces([]namespace.Metadata{md}).
- SetWriteNewSeriesAsync(true)
- testSetup, err := newTestSetup(t, testOpts, nil)
- require.NoError(t, err)
- defer testSetup.close()
-
- // Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
- require.NoError(t, testSetup.startServer())
-
- // Stop the server
- defer func() {
- require.NoError(t, testSetup.stopServer())
- log.Debug("server is now down")
- }()
-
- client := testSetup.m3dbClient
- session, err := client.DefaultSession()
- require.NoError(t, err)
-
- var (
- insertWg sync.WaitGroup
- numTotalErrors uint32
- )
- now := testSetup.db.Options().ClockOptions().NowFn()()
- start := time.Now()
- log.Info("starting data write")
-
- for i := 0; i < concurrency; i++ {
- insertWg.Add(1)
- idx := i
- go func() {
- numErrors := uint32(0)
- for j := 0; j < writeEach; j++ {
- id, tags := genIDTags(idx, j, numTags)
- err := session.WriteTagged(md.ID(), id, tags, now, float64(1.0), xtime.Second, nil)
- if err != nil {
- numErrors++
+
+ // If concurrent query load enabled while writing also hit with queries.
+ queryConcDuringWritesCloseCh := make(chan struct{}, 1)
+ numTotalQueryMatches := atomic.NewUint32(0)
+ numTotalQueryErrors := atomic.NewUint32(0)
+ checkNumTotalQueryMatches := false
+ if opts.concurrencyQueryDuringWrites == 0 {
+ log.Info("no concurrent queries during writes configured")
+ } else {
+ log.Info("starting concurrent queries during writes",
+ zap.Int("concurrency", opts.concurrencyQueryDuringWrites))
+ checkNumTotalQueryMatches = true
+ for i := 0; i < opts.concurrencyQueryDuringWrites; i++ {
+ go func() {
+ src := rand.NewSource(int64(i))
+ rng := rand.New(src)
+ for {
+ select {
+ case <-queryConcDuringWritesCloseCh:
+ return
+ default:
+ }
+
+ switch opts.concurrencyQueryDuringWritesType {
+ case indexQuery:
+ randI := rng.Intn(opts.concurrencyEnqueueWorker)
+ randJ := rng.Intn(opts.enqueuePerWorker)
+ id, tags := genIDTags(randI, randJ, opts.numTags)
+ ok, err := isIndexedChecked(t, session, md.ID(), id, tags)
+ if err != nil {
+ if n := numTotalQueryErrors.Inc(); n < 10 {
+ // Log the first 10 errors for visibility but not flood.
+ log.Error("sampled query error", zap.Error(err))
}
}
- atomic.AddUint32(&numTotalErrors, numErrors)
- insertWg.Done()
- }()
+ if ok {
+ numTotalQueryMatches.Inc()
+ }
+ case indexAggregateQuery:
+ randI := rng.Intn(opts.concurrencyEnqueueWorker)
+ match := idx.NewTermQuery([]byte("common_i"), []byte(strconv.Itoa(randI)))
+ q := index.Query{Query: match}
+
+ now := time.Now()
+ qOpts := index.AggregationOptions{
+ QueryOptions: index.QueryOptions{
+ StartInclusive: now.Add(-md.Options().RetentionOptions().RetentionPeriod()),
+ EndExclusive: now,
+ DocsLimit: 1000,
+ },
+ }
+
+ ctx := context.NewContext()
+ r, err := testSetup.DB().AggregateQuery(ctx, md.ID(), q, qOpts)
+ if err != nil {
+ panic(err)
+ }
+
+ tagValues := 0
+ for _, entry := range r.Results.Map().Iter() {
+ values := entry.Value()
+ tagValues += values.Size()
+ }
+
+ // Done with resources, return to pool.
+ ctx.Close()
+
+ numTotalQueryMatches.Add(uint32(tagValues))
+ default:
+ panic("unknown query type")
+ }
}
+ }()
+ }
+ }
+
+ // Wait for writes to at least be enqueued.
+ wg.Wait()
+
+ // Check no write errors.
+ require.Equal(t, int(0), int(numTotalErrors.Load()))
+
+ if checkNumTotalQueryMatches {
+ // Check matches.
+ require.True(t, numTotalQueryMatches.Load() > 0, "no query matches")
+ }
+
+ log.Info("test data written",
+ zap.Duration("took", time.Since(start)),
+ zap.Int("written", int(numTotalSuccess.Load())),
+ zap.Time("serverTime", nowFn()),
+ zap.Uint32("queryMatches", numTotalQueryMatches.Load()))
+
+ log.Info("data indexing verify start")
- insertWg.Wait()
- require.Zero(t, numTotalErrors)
- log.Info("test data written", zap.Duration("took", time.Since(start)))
- log.Info("waiting to see if data is indexed")
+ // Wait for at least all things to be enqueued for indexing.
+ expectStatPrefix := "dbindex.index-attempt+namespace=testNs1,"
+ expectStatProcess := expectStatPrefix + "stage=process"
+ numIndexTotal := opts.enqueuePerWorker
+ multiplyByConcurrency := multiplyBy(opts.concurrencyEnqueueWorker)
+ expectNumIndex := multiplyByConcurrency(numIndexTotal)
+ indexProcess := xclock.WaitUntil(func() bool {
+ counters := testSetup.Scope().Snapshot().Counters()
+ counter, ok := counters[expectStatProcess]
+ if !ok {
+ return false
+ }
+ return int(counter.Value()) == expectNumIndex
+ }, time.Minute)
- var (
- fetchWg sync.WaitGroup
- )
- for i := 0; i < concurrency; i++ {
+ counters := testSetup.Scope().Snapshot().Counters()
+ counter, ok := counters[expectStatProcess]
+
+ var value int
+ if ok {
+ value = int(counter.Value())
+ }
+ assert.True(t, indexProcess,
+ fmt.Sprintf("expected to index %d but processed %d", expectNumIndex, value))
+
+ // Allow concurrent query during writes to finish.
+ close(queryConcDuringWritesCloseCh)
+
+ // Check no query errors.
+ require.Equal(t, int(0), int(numTotalErrors.Load()))
+
+ if !opts.skipVerify {
+ log.Info("data indexing each series visible start")
+ // Now check all of them are individually indexed.
+ var (
+ fetchWg sync.WaitGroup
+ notIndexedErrs []error
+ notIndexedLock sync.Mutex
+ )
+ for i := 0; i < opts.concurrencyEnqueueWorker; i++ {
+ fetchWg.Add(1)
+ i := i
+ go func() {
+ defer fetchWg.Done()
+
+ for j := 0; j < opts.enqueuePerWorker; j++ {
+ if opts.skipWrites && j%2 == 0 {
+ continue // not meant to be indexed.
+ }
+
+ j := j
fetchWg.Add(1)
- idx := i
- go func() {
- id, tags := genIDTags(idx, writeEach-1, numTags)
+ workerPool.Go(func() {
+ defer fetchWg.Done()
+
+ id, tags := genIDTags(i, j, opts.numTags)
indexed := xclock.WaitUntil(func() bool {
found := isIndexed(t, session, md.ID(), id, tags)
return found
}, 30*time.Second)
- assert.True(t, indexed)
- fetchWg.Done()
- }()
+ if !indexed {
+ err := fmt.Errorf("not indexed series: i=%d, j=%d", i, j)
+ notIndexedLock.Lock()
+ notIndexedErrs = append(notIndexedErrs, err)
+ notIndexedLock.Unlock()
+ }
+ })
}
- fetchWg.Wait()
- log.Info("data is indexed", zap.Duration("took", time.Since(start)))
- })
+ }()
+ }
+ fetchWg.Wait()
+
+ require.Equal(t, 0, len(notIndexedErrs),
+ fmt.Sprintf("not indexed errors: %v", notIndexedErrs[:min(5, len(notIndexedErrs))]))
+ }
+
+ log.Info("data indexing verify done", zap.Duration("took", time.Since(start)))
+
+ // Make sure attempted total indexing = skipped + written.
+ counters = testSetup.Scope().Snapshot().Counters()
+ totalSkippedWritten := 0
+ for _, expectID := range []string{
+ expectStatPrefix + "stage=skip",
+ expectStatPrefix + "stage=write",
+ } {
+ actual, ok := counters[expectID]
+ assert.True(t, ok,
+ fmt.Sprintf("counter not found to test value: id=%s", expectID))
+ if ok {
+ totalSkippedWritten += int(actual.Value())
+ }
+ }
+
+ log.Info("check written + skipped",
+ zap.Int("expectedValue", multiplyByConcurrency(numIndexTotal)),
+ zap.Int("actualValue", totalSkippedWritten))
+ assert.Equal(t, multiplyByConcurrency(numIndexTotal), totalSkippedWritten,
+ "total written + skipped mismatch")
+}
+
+func multiplyBy(n int) func(int) int {
+ return func(x int) int {
+ return n * x
+ }
+}
+
+func min(x, y int) int {
+ if x < y {
+ return x
}
+ return y
}
diff --git a/src/dbnode/integration/index_warm_write_gap_test.go b/src/dbnode/integration/index_warm_write_gap_test.go
new file mode 100644
index 0000000000..560acd55ef
--- /dev/null
+++ b/src/dbnode/integration/index_warm_write_gap_test.go
@@ -0,0 +1,129 @@
+// +build integration
+//
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package integration
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/m3ninx/idx"
+ xclock "github.com/m3db/m3/src/x/clock"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+)
+
+/*
+ * This test runs the following situation, Now is 1p, data blockSize is 1h, index blockSize is 2h,
+ * retention period 2h, buffer past 10mins, and buffer future 20mins. We write & index 50 metrics
+ * between (12p, 12.50p). We then ensure that index writes within this warm index start -> start of buffer
+ * past gap are indexed.
+ *
+ */
+func TestWarmIndexWriteGap(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow() // Just skip if we're doing a short run
+ }
+
+ var (
+ numWrites = 50
+ numTags = 10
+ retentionPeriod = 2 * time.Hour
+ dataBlockSize = time.Hour
+ indexBlockSize = 2 * time.Hour
+ bufferFuture = 20 * time.Minute
+ bufferPast = 10 * time.Minute
+ )
+
+ // Test setup
+ md, err := namespace.NewMetadata(testNamespaces[0],
+ namespace.NewOptions().
+ SetRetentionOptions(
+ retention.NewOptions().
+ SetRetentionPeriod(retentionPeriod).
+ SetBufferPast(bufferPast).
+ SetBufferFuture(bufferFuture).
+ SetBlockSize(dataBlockSize)).
+ SetIndexOptions(
+ namespace.NewIndexOptions().
+ SetBlockSize(indexBlockSize).SetEnabled(true)).
+ SetColdWritesEnabled(true))
+ require.NoError(t, err)
+
+ testOpts := NewTestOptions(t).
+ SetNamespaces([]namespace.Metadata{md}).
+ SetWriteNewSeriesAsync(true)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
+ require.NoError(t, err)
+ defer testSetup.Close()
+
+ t0 := time.Date(2018, time.May, 6, 13, 0, 0, 0, time.UTC)
+ // Issue writes in the gap between warm index start and start of buffer past.
+ t1 := t0.Truncate(indexBlockSize)
+ t2 := t0.Truncate(dataBlockSize).Add(-bufferPast)
+ testSetup.SetNowFn(t0)
+
+ writesPeriod0 := GenerateTestIndexWrite(0, numWrites, numTags, t1, t2)
+
+ // Start the server
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
+ require.NoError(t, testSetup.StartServer())
+
+ // Stop the server
+ defer func() {
+ require.NoError(t, testSetup.StopServer())
+ log.Debug("server is now down")
+ }()
+
+ client := testSetup.M3DBClient()
+ session, err := client.DefaultSession()
+ require.NoError(t, err)
+
+ log.Info("starting data write")
+ start := time.Now()
+ writesPeriod0.Write(t, md.ID(), session)
+ log.Info("test data written", zap.Duration("took", time.Since(start)))
+
+ log.Info("waiting till data is indexed")
+ indexed := xclock.WaitUntil(func() bool {
+ indexPeriod0 := writesPeriod0.NumIndexed(t, md.ID(), session)
+ return indexPeriod0 == len(writesPeriod0)
+ }, 5*time.Second)
+ require.True(t, indexed)
+ log.Info("verifiied data is indexed", zap.Duration("took", time.Since(start)))
+
+ // "shared":"shared", is a common tag across all written metrics
+ query := index.Query{
+ Query: idx.NewTermQuery([]byte("shared"), []byte("shared"))}
+
+ // ensure all data is present
+ log.Info("querying period0 results")
+ period0Results, _, err := session.FetchTagged(
+ md.ID(), query, index.QueryOptions{StartInclusive: t1, EndExclusive: t2})
+ require.NoError(t, err)
+ writesPeriod0.MatchesSeriesIters(t, period0Results)
+ log.Info("found period0 results")
+}
diff --git a/src/dbnode/integration/integration.go b/src/dbnode/integration/integration.go
index ca6578a204..b0fdba6c39 100644
--- a/src/dbnode/integration/integration.go
+++ b/src/dbnode/integration/integration.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
persistfs "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
@@ -43,7 +44,10 @@ import (
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/topology/testutil"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
+ "github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/index/segment/builder"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/instrument"
xretry "github.com/m3db/m3/src/x/retry"
@@ -71,7 +75,7 @@ func waitUntil(fn conditionFn, timeout time.Duration) bool {
return false
}
-func newMultiAddrTestOptions(opts testOptions, instance int) testOptions {
+func newMultiAddrTestOptions(opts TestOptions, instance int) TestOptions {
bind := "127.0.0.1"
start := multiAddrPortStart + (instance * multiAddrPortEach)
return opts.
@@ -133,12 +137,12 @@ func newDefaulTestResultOptions(
func newDefaultBootstrappableTestSetups(
t *testing.T,
- opts testOptions,
+ opts TestOptions,
setupOpts []bootstrappableTestSetupOptions,
) (testSetups, closeFn) {
var (
replicas = len(setupOpts)
- setups []*testSetup
+ setups []TestSetup
cleanupFns []func()
cleanupFnsMutex sync.RWMutex
@@ -202,11 +206,11 @@ func newDefaultBootstrappableTestSetups(
SetClusterDatabaseTopologyInitializer(topologyInitializer).
SetUseTChannelClientForWriting(useTChannelClientForWriting)
- setup, err := newTestSetup(t, instanceOpts, nil)
+ setup, err := NewTestSetup(t, instanceOpts, nil)
require.NoError(t, err)
- topologyInitializer = setup.topoInit
+ topologyInitializer = setup.TopologyInitializer()
- instrumentOpts := setup.storageOpts.InstrumentOptions()
+ instrumentOpts := setup.StorageOpts().InstrumentOptions()
logger := instrumentOpts.Logger()
logger = logger.With(zap.Int("instance", instance))
instrumentOpts = instrumentOpts.SetLogger(logger)
@@ -214,10 +218,10 @@ func newDefaultBootstrappableTestSetups(
scope, _ := tally.NewRootScope(tally.ScopeOptions{Reporter: testStatsReporter}, 100*time.Millisecond)
instrumentOpts = instrumentOpts.SetMetricsScope(scope)
}
- setup.storageOpts = setup.storageOpts.SetInstrumentOptions(instrumentOpts)
+ setup.SetStorageOpts(setup.StorageOpts().SetInstrumentOptions(instrumentOpts))
var (
- bsOpts = newDefaulTestResultOptions(setup.storageOpts)
+ bsOpts = newDefaulTestResultOptions(setup.StorageOpts())
finalBootstrapper bootstrap.BootstrapperProvider
adminOpts = client.NewAdminOptions().
@@ -253,11 +257,11 @@ func newDefaultBootstrappableTestSetups(
adminOpts = adminOpts.SetStreamBlocksRetrier(retrier)
adminClient := newMultiAddrAdminClient(
t, adminOpts, topologyInitializer, origin, instrumentOpts)
- storageIdxOpts := setup.storageOpts.IndexOptions()
- fsOpts := setup.storageOpts.CommitLogOptions().FilesystemOptions()
+ storageIdxOpts := setup.StorageOpts().IndexOptions()
+ fsOpts := setup.StorageOpts().CommitLogOptions().FilesystemOptions()
if usingPeersBootstrapper {
var (
- runtimeOptsMgr = setup.storageOpts.RuntimeOptionsManager()
+ runtimeOptsMgr = setup.StorageOpts().RuntimeOptionsManager()
runtimeOpts = runtimeOptsMgr.Get().
SetClientBootstrapConsistencyLevel(bootstrapConsistencyLevel)
)
@@ -270,11 +274,10 @@ func newDefaultBootstrappableTestSetups(
SetFilesystemOptions(fsOpts).
// DatabaseBlockRetrieverManager and PersistManager need to be set or we will never execute
// the persist bootstrapping path
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
- SetPersistManager(setup.storageOpts.PersistManager()).
+ SetPersistManager(setup.StorageOpts().PersistManager()).
SetCompactor(newCompactor(t, storageIdxOpts)).
SetRuntimeOptionsManager(runtimeOptsMgr).
- SetContextPool(setup.storageOpts.ContextPool())
+ SetContextPool(setup.StorageOpts().ContextPool())
finalBootstrapper, err = peers.NewPeersBootstrapperProvider(peersOpts, finalBootstrapper)
require.NoError(t, err)
@@ -287,7 +290,6 @@ func newDefaultBootstrappableTestSetups(
SetResultOptions(bsOpts).
SetFilesystemOptions(fsOpts).
SetIndexOptions(storageIdxOpts).
- SetDatabaseBlockRetrieverManager(setup.storageOpts.DatabaseBlockRetrieverManager()).
SetCompactor(newCompactor(t, storageIdxOpts)).
SetPersistManager(persistMgr)
@@ -296,28 +298,28 @@ func newDefaultBootstrappableTestSetups(
processOpts := bootstrap.NewProcessOptions().
SetTopologyMapProvider(setup).
- SetOrigin(setup.origin)
+ SetOrigin(setup.Origin())
provider, err := bootstrap.NewProcessProvider(fsBootstrapper, processOpts, bsOpts)
require.NoError(t, err)
- setup.storageOpts = setup.storageOpts.SetBootstrapProcessProvider(provider)
+ setup.SetStorageOpts(setup.StorageOpts().SetBootstrapProcessProvider(provider))
if enableRepairs {
- setup.storageOpts = setup.storageOpts.
+ setup.SetStorageOpts(setup.StorageOpts().
SetRepairEnabled(true).
SetRepairOptions(
- setup.storageOpts.RepairOptions().
+ setup.StorageOpts().RepairOptions().
SetRepairThrottle(time.Millisecond).
SetRepairCheckInterval(time.Millisecond).
SetAdminClients([]client.AdminClient{adminClient}).
SetDebugShadowComparisonsPercentage(1.0).
// Avoid log spam.
- SetDebugShadowComparisonsEnabled(false))
+ SetDebugShadowComparisonsEnabled(false)))
}
setups = append(setups, setup)
appendCleanupFn(func() {
- setup.close()
+ setup.Close()
})
}
@@ -332,27 +334,27 @@ func newDefaultBootstrappableTestSetups(
func writeTestDataToDisk(
metadata namespace.Metadata,
- setup *testSetup,
+ setup TestSetup,
seriesMaps generate.SeriesBlocksByStart,
volume int,
) error {
ropts := metadata.Options().RetentionOptions()
- writer := generate.NewWriter(setup.generatorOptions(ropts))
- return writer.WriteData(namespace.NewContextFrom(metadata), setup.shardSet, seriesMaps, volume)
+ writer := generate.NewWriter(setup.GeneratorOptions(ropts))
+ return writer.WriteData(namespace.NewContextFrom(metadata), setup.ShardSet(), seriesMaps, volume)
}
func writeTestSnapshotsToDiskWithPredicate(
metadata namespace.Metadata,
- setup *testSetup,
+ setup TestSetup,
seriesMaps generate.SeriesBlocksByStart,
volume int,
pred generate.WriteDatapointPredicate,
snapshotInterval time.Duration,
) error {
ropts := metadata.Options().RetentionOptions()
- writer := generate.NewWriter(setup.generatorOptions(ropts))
+ writer := generate.NewWriter(setup.GeneratorOptions(ropts))
return writer.WriteSnapshotWithPredicate(
- namespace.NewContextFrom(metadata), setup.shardSet, seriesMaps, volume, pred, snapshotInterval)
+ namespace.NewContextFrom(metadata), setup.ShardSet(), seriesMaps, volume, pred, snapshotInterval)
}
func concatShards(a, b shard.Shards) shard.Shards {
@@ -415,7 +417,13 @@ func newCompactor(
t *testing.T,
opts index.Options,
) *compaction.Compactor {
- compactor, err := compaction.NewCompactor(opts.DocumentArrayPool(),
+ compactor, err := newCompactorWithErr(opts)
+ require.NoError(t, err)
+ return compactor
+}
+
+func newCompactorWithErr(opts index.Options) (*compaction.Compactor, error) {
+ return compaction.NewCompactor(opts.DocumentArrayPool(),
index.DocumentArrayPoolCapacity,
opts.SegmentBuilderOptions(),
opts.FSTSegmentOptions(),
@@ -427,7 +435,69 @@ func newCompactor(
DisableRegistry: true,
},
})
- require.NoError(t, err)
- return compactor
+}
+
+func writeTestIndexDataToDisk(
+ md namespace.Metadata,
+ storageOpts storage.Options,
+ indexVolumeType idxpersist.IndexVolumeType,
+ blockStart time.Time,
+ shards []uint32,
+ docs []doc.Document,
+) error {
+ blockSize := md.Options().IndexOptions().BlockSize()
+ fsOpts := storageOpts.CommitLogOptions().FilesystemOptions()
+ writer, err := fs.NewIndexWriter(fsOpts)
+ if err != nil {
+ return err
+ }
+ segmentWriter, err := idxpersist.NewMutableSegmentFileSetWriter(fst.WriterOptions{})
+ if err != nil {
+ return err
+ }
+ shardsMap := make(map[uint32]struct{})
+ for _, shard := range shards {
+ shardsMap[shard] = struct{}{}
+ }
+ volumeIndex, err := fs.NextIndexFileSetVolumeIndex(
+ fsOpts.FilePathPrefix(),
+ md.ID(),
+ blockStart,
+ )
+ if err != nil {
+ return err
+ }
+ writerOpts := fs.IndexWriterOpenOptions{
+ Identifier: fs.FileSetFileIdentifier{
+ Namespace: md.ID(),
+ BlockStart: blockStart,
+ VolumeIndex: volumeIndex,
+ },
+ BlockSize: blockSize,
+ Shards: shardsMap,
+ IndexVolumeType: indexVolumeType,
+ }
+ if err := writer.Open(writerOpts); err != nil {
+ return err
+ }
+
+ builder, err := builder.NewBuilderFromDocuments(builder.NewOptions())
+ for _, doc := range docs {
+ _, err = builder.Insert(doc)
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := segmentWriter.Reset(builder); err != nil {
+ return err
+ }
+ if err := writer.WriteSegmentFileSet(segmentWriter); err != nil {
+ return err
+ }
+ if err := builder.Close(); err != nil {
+ return err
+ }
+ return writer.Close()
}
diff --git a/src/dbnode/integration/integration_data_verify.go b/src/dbnode/integration/integration_data_verify.go
index bfc94a66c6..e889b596c1 100644
--- a/src/dbnode/integration/integration_data_verify.go
+++ b/src/dbnode/integration/integration_data_verify.go
@@ -61,8 +61,9 @@ func toDatapoints(fetched *rpc.FetchResult_) []generate.TestValue {
for i, dp := range fetched.Datapoints {
converted[i] = generate.TestValue{
Datapoint: ts.Datapoint{
- Timestamp: xtime.FromNormalizedTime(dp.Timestamp, time.Second),
- Value: dp.Value,
+ Timestamp: xtime.FromNormalizedTime(dp.Timestamp, time.Second),
+ TimestampNanos: xtime.ToUnixNano(xtime.FromNormalizedTime(dp.Timestamp, time.Second)),
+ Value: dp.Value,
},
Annotation: dp.Annotation,
}
@@ -72,7 +73,7 @@ func toDatapoints(fetched *rpc.FetchResult_) []generate.TestValue {
func verifySeriesMapForRange(
t *testing.T,
- ts *testSetup,
+ ts TestSetup,
start, end time.Time,
namespace ident.ID,
input generate.SeriesBlock,
@@ -94,7 +95,7 @@ func verifySeriesMapForRange(
req.RangeStart = xtime.ToNormalizedTime(start, time.Second)
req.RangeEnd = xtime.ToNormalizedTime(end, time.Second)
req.ResultTimeType = rpc.TimeType_UNIX_SECONDS
- fetched, err := ts.fetch(req)
+ fetched, err := ts.Fetch(req)
if !assert.NoError(t, err) {
return false
@@ -127,13 +128,13 @@ func verifySeriesMapForRange(
return false
}
for i, series := range actual {
- if ts.assertEqual == nil {
+ if ts.ShouldBeEqual() {
if !assert.Equal(t, expected[i], series) {
return false
}
} else {
assert.Equal(t, expected[i].ID, series.ID)
- if !ts.assertEqual(t, expected[i].Data, series.Data) {
+ if !ts.AssertEqual(t, expected[i].Data, series.Data) {
return false
}
}
@@ -142,7 +143,7 @@ func verifySeriesMapForRange(
// Now check the metadata of all the series match
ctx := context.NewContext()
defer ctx.Close()
- for _, shard := range ts.db.ShardSet().AllIDs() {
+ for _, shard := range ts.DB().ShardSet().AllIDs() {
var (
opts block.FetchBlocksMetadataOptions
pageToken storage.PageToken
@@ -156,7 +157,7 @@ func verifySeriesMapForRange(
break
}
- results, nextPageToken, err := ts.db.FetchBlocksMetadataV2(ctx,
+ results, nextPageToken, err := ts.DB().FetchBlocksMetadataV2(ctx,
namespace, shard, start, end, 4096, pageToken, opts)
assert.NoError(t, err)
@@ -196,11 +197,14 @@ func verifySeriesMapForRange(
entry += tag.Name.String() + "=" + tag.Value.String()
actual += entry
}
- ts.logger.Error("series does not match expected tags",
- zap.String("id", id),
- zap.String("expectedTags", expected),
- zap.String("actualTags", actual),
- )
+ ts.StorageOpts().InstrumentOptions().Logger().
+ Error("series does not match expected tags",
+ zap.String("id", id),
+ zap.String("expectedTags", expected),
+ zap.String("actualTags", actual),
+ zap.Any("expectedTagsErr", expectedTagsIter.Err()),
+ zap.Any("actualTagsErrr", actualTagsIter.Err()),
+ )
}
if !assert.True(t, tagMatcher.Matches(actualTagsIter)) {
@@ -213,14 +217,14 @@ func verifySeriesMapForRange(
return true
}
-func containsSeries(ts *testSetup, namespace, seriesID ident.ID, start, end time.Time) (bool, error) {
+func containsSeries(ts TestSetup, namespace, seriesID ident.ID, start, end time.Time) (bool, error) {
req := rpc.NewFetchRequest()
req.NameSpace = namespace.String()
req.ID = seriesID.String()
req.RangeStart = xtime.ToNormalizedTime(start, time.Second)
req.RangeEnd = xtime.ToNormalizedTime(end, time.Second)
req.ResultTimeType = rpc.TimeType_UNIX_SECONDS
- fetched, err := ts.fetch(req)
+ fetched, err := ts.Fetch(req)
return len(fetched) != 0, err
}
@@ -269,11 +273,11 @@ func writeVerifyDebugOutput(
func verifySeriesMaps(
t *testing.T,
- ts *testSetup,
+ ts TestSetup,
namespace ident.ID,
seriesMaps map[xtime.UnixNano]generate.SeriesBlock,
) bool {
- debugFilePathPrefix := ts.opts.VerifySeriesDebugFilePathPrefix()
+ debugFilePathPrefix := ts.Opts().VerifySeriesDebugFilePathPrefix()
expectedDebugFilePath, ok := createFileIfPrefixSet(t, debugFilePathPrefix, fmt.Sprintf("%s-expected.log", namespace.String()))
if !ok {
return false
@@ -283,7 +287,7 @@ func verifySeriesMaps(
return false
}
- nsMetadata, ok := ts.db.Namespace(namespace)
+ nsMetadata, ok := ts.DB().Namespace(namespace)
if !assert.True(t, ok) {
return false
}
diff --git a/src/dbnode/integration/options.go b/src/dbnode/integration/options.go
index dec5eca56e..ad9dc3bcca 100644
--- a/src/dbnode/integration/options.go
+++ b/src/dbnode/integration/options.go
@@ -84,120 +84,121 @@ var (
defaultIntegrationTestRetentionOpts = retention.NewOptions().SetRetentionPeriod(6 * time.Hour)
)
-type testOptions interface {
+// TestOptions contains integration test options.
+type TestOptions interface {
// SetNamespaces sets the namespaces.
- SetNamespaces(value []namespace.Metadata) testOptions
+ SetNamespaces(value []namespace.Metadata) TestOptions
// Namespaces returns the namespaces.
Namespaces() []namespace.Metadata
// SetNamespaceInitializer sets the namespace initializer,
// if this is set, it superseeds Namespaces()
- SetNamespaceInitializer(value namespace.Initializer) testOptions
+ SetNamespaceInitializer(value namespace.Initializer) TestOptions
// NamespaceInitializer returns the namespace initializer
NamespaceInitializer() namespace.Initializer
// SetID sets the node ID.
- SetID(value string) testOptions
+ SetID(value string) TestOptions
// ID returns the node ID.
ID() string
// SetTickMinimumInterval sets the tick interval.
- SetTickMinimumInterval(value time.Duration) testOptions
+ SetTickMinimumInterval(value time.Duration) TestOptions
// TickMinimumInterval returns the tick interval.
TickMinimumInterval() time.Duration
// SetHTTPClusterAddr sets the http cluster address.
- SetHTTPClusterAddr(value string) testOptions
+ SetHTTPClusterAddr(value string) TestOptions
// HTTPClusterAddr returns the http cluster address.
HTTPClusterAddr() string
// SetTChannelClusterAddr sets the tchannel cluster address.
- SetTChannelClusterAddr(value string) testOptions
+ SetTChannelClusterAddr(value string) TestOptions
// TChannelClusterAddr returns the tchannel cluster address.
TChannelClusterAddr() string
// SetHTTPNodeAddr sets the http node address.
- SetHTTPNodeAddr(value string) testOptions
+ SetHTTPNodeAddr(value string) TestOptions
// HTTPNodeAddr returns the http node address.
HTTPNodeAddr() string
// SetTChannelNodeAddr sets the tchannel node address.
- SetTChannelNodeAddr(value string) testOptions
+ SetTChannelNodeAddr(value string) TestOptions
// TChannelNodeAddr returns the tchannel node address.
TChannelNodeAddr() string
// SetHTTPDebugAddr sets the http debug address.
- SetHTTPDebugAddr(value string) testOptions
+ SetHTTPDebugAddr(value string) TestOptions
// HTTPDebugAddr returns the http debug address.
HTTPDebugAddr() string
// SetServerStateChangeTimeout sets the server state change timeout.
- SetServerStateChangeTimeout(value time.Duration) testOptions
+ SetServerStateChangeTimeout(value time.Duration) TestOptions
// ServerStateChangeTimeout returns the server state change timeout.
ServerStateChangeTimeout() time.Duration
// SetClusterConnectionTimeout sets the cluster connection timeout.
- SetClusterConnectionTimeout(value time.Duration) testOptions
+ SetClusterConnectionTimeout(value time.Duration) TestOptions
// ClusterConnectionTimeout returns the cluster connection timeout.
ClusterConnectionTimeout() time.Duration
// SetReadRequestTimeout sets the read request timeout.
- SetReadRequestTimeout(value time.Duration) testOptions
+ SetReadRequestTimeout(value time.Duration) TestOptions
// ReadRequestTimeout returns the read request timeout.
ReadRequestTimeout() time.Duration
// SetWriteRequestTimeout sets the write request timeout.
- SetWriteRequestTimeout(value time.Duration) testOptions
+ SetWriteRequestTimeout(value time.Duration) TestOptions
// WriteRequestTimeout returns the write request timeout.
WriteRequestTimeout() time.Duration
// SetTruncateRequestTimeout sets the truncate request timeout.
- SetTruncateRequestTimeout(value time.Duration) testOptions
+ SetTruncateRequestTimeout(value time.Duration) TestOptions
// TruncateRequestTimeout returns the truncate request timeout.
TruncateRequestTimeout() time.Duration
// SetWorkerPoolSize sets the number of workers in the worker pool.
- SetWorkerPoolSize(value int) testOptions
+ SetWorkerPoolSize(value int) TestOptions
// WorkerPoolSize returns the number of workers in the worker pool.
WorkerPoolSize() int
// SetClusterDatabaseTopologyInitializer sets the topology initializer that
// is used when creating a cluster database
- SetClusterDatabaseTopologyInitializer(value topology.Initializer) testOptions
+ SetClusterDatabaseTopologyInitializer(value topology.Initializer) TestOptions
// ClusterDatabaseTopologyInitializer returns the topology initializer that
// is used when creating a cluster database
ClusterDatabaseTopologyInitializer() topology.Initializer
// SetUseTChannelClientForReading sets whether we use the tchannel client for reading.
- SetUseTChannelClientForReading(value bool) testOptions
+ SetUseTChannelClientForReading(value bool) TestOptions
// UseTChannelClientForReading returns whether we use the tchannel client for reading.
UseTChannelClientForReading() bool
// SetUseTChannelClientForWriting sets whether we use the tchannel client for writing.
- SetUseTChannelClientForWriting(value bool) testOptions
+ SetUseTChannelClientForWriting(value bool) TestOptions
// UseTChannelClientForWriting returns whether we use the tchannel client for writing.
UseTChannelClientForWriting() bool
// SetUseTChannelClientForTruncation sets whether we use the tchannel client for truncation.
- SetUseTChannelClientForTruncation(value bool) testOptions
+ SetUseTChannelClientForTruncation(value bool) TestOptions
// UseTChannelClientForTruncation returns whether we use the tchannel client for truncation.
UseTChannelClientForTruncation() bool
@@ -212,7 +213,7 @@ type testOptions interface {
// for a namespace from the manager.
SetDatabaseBlockRetrieverManager(
value block.DatabaseBlockRetrieverManager,
- ) testOptions
+ ) TestOptions
// NewBlockRetrieverFn returns the new block retriever constructor to
// use when bootstrapping retrievable blocks instead of blocks
@@ -220,7 +221,7 @@ type testOptions interface {
DatabaseBlockRetrieverManager() block.DatabaseBlockRetrieverManager
// SetVerifySeriesDebugFilePathPrefix sets the file path prefix for writing a debug file of series comparisons.
- SetVerifySeriesDebugFilePathPrefix(value string) testOptions
+ SetVerifySeriesDebugFilePathPrefix(value string) TestOptions
// VerifySeriesDebugFilePathPrefix returns the file path prefix for writing a debug file of series comparisons.
VerifySeriesDebugFilePathPrefix() string
@@ -229,47 +230,47 @@ type testOptions interface {
WriteConsistencyLevel() topology.ConsistencyLevel
// SetWriteConsistencyLevel sets the consistency level for writing with the m3db client.
- SetWriteConsistencyLevel(value topology.ConsistencyLevel) testOptions
+ SetWriteConsistencyLevel(value topology.ConsistencyLevel) TestOptions
// NumShards returns the number of shards to use.
NumShards() int
// SetNumShards sets the number of shards to use.
- SetNumShards(value int) testOptions
+ SetNumShards(value int) TestOptions
// MaxWiredBlocks returns the maximum number of wired blocks to keep in memory using the LRU cache.
MaxWiredBlocks() uint
// SetMaxWiredBlocks sets the maximum number of wired blocks to keep in memory using the LRU cache.
- SetMaxWiredBlocks(value uint) testOptions
+ SetMaxWiredBlocks(value uint) TestOptions
// SetWriteNewSeriesAsync sets whether we insert/index asynchronously.
- SetWriteNewSeriesAsync(bool) testOptions
+ SetWriteNewSeriesAsync(bool) TestOptions
// WriteNewSeriesAsync returns whether we insert/index asynchronously.
WriteNewSeriesAsync() bool
// SetFilePathPrefix sets the file path prefix.
- SetFilePathPrefix(value string) testOptions
+ SetFilePathPrefix(value string) TestOptions
// FilePathPrefix returns the file path prefix.
FilePathPrefix() string
// SetProtoEncoding turns on proto encoder.
- SetProtoEncoding(value bool) testOptions
+ SetProtoEncoding(value bool) TestOptions
// ProtoEncoding returns whether proto encoder is turned on.
ProtoEncoding() bool
// SetAssertTestDataEqual sets a comparator to compare two byte arrays,
// useful for proto-encoded annotations.
- SetAssertTestDataEqual(value assertTestDataEqual) testOptions
+ SetAssertTestDataEqual(value assertTestDataEqual) TestOptions
// AssertTestDataEqual returns a comparator to compare two byte arrays.
AssertTestDataEqual() assertTestDataEqual
// SetNowFn will set the now fn.
- SetNowFn(value func() time.Time) testOptions
+ SetNowFn(value func() time.Time) TestOptions
// NowFn returns the now fn.
NowFn() func() time.Time
@@ -307,7 +308,8 @@ type options struct {
nowFn func() time.Time
}
-func newTestOptions(t *testing.T) testOptions {
+// NewTestOptions returns a new set of integration test options.
+func NewTestOptions(t *testing.T) TestOptions {
var namespaces []namespace.Metadata
nsOpts := namespace.NewOptions().
SetRepairEnabled(false).
@@ -339,7 +341,7 @@ func newTestOptions(t *testing.T) testOptions {
}
}
-func (o *options) SetNamespaces(value []namespace.Metadata) testOptions {
+func (o *options) SetNamespaces(value []namespace.Metadata) TestOptions {
opts := *o
opts.namespaces = opts.namespaces[:0]
opts.namespaces = value
@@ -350,7 +352,7 @@ func (o *options) Namespaces() []namespace.Metadata {
return o.namespaces
}
-func (o *options) SetNamespaceInitializer(value namespace.Initializer) testOptions {
+func (o *options) SetNamespaceInitializer(value namespace.Initializer) TestOptions {
opts := *o
opts.nsInitializer = value
return &opts
@@ -360,7 +362,7 @@ func (o *options) NamespaceInitializer() namespace.Initializer {
return o.nsInitializer
}
-func (o *options) SetID(value string) testOptions {
+func (o *options) SetID(value string) TestOptions {
opts := *o
opts.id = value
return &opts
@@ -369,7 +371,7 @@ func (o *options) SetID(value string) testOptions {
func (o *options) ID() string {
return o.id
}
-func (o *options) SetTickMinimumInterval(value time.Duration) testOptions {
+func (o *options) SetTickMinimumInterval(value time.Duration) TestOptions {
opts := *o
opts.tickMinimumInterval = value
return &opts
@@ -379,7 +381,7 @@ func (o *options) TickMinimumInterval() time.Duration {
return o.tickMinimumInterval
}
-func (o *options) SetHTTPClusterAddr(value string) testOptions {
+func (o *options) SetHTTPClusterAddr(value string) TestOptions {
opts := *o
opts.httpClusterAddr = value
return &opts
@@ -389,7 +391,7 @@ func (o *options) HTTPClusterAddr() string {
return o.httpClusterAddr
}
-func (o *options) SetTChannelClusterAddr(value string) testOptions {
+func (o *options) SetTChannelClusterAddr(value string) TestOptions {
opts := *o
opts.tchannelClusterAddr = value
return &opts
@@ -399,7 +401,7 @@ func (o *options) TChannelClusterAddr() string {
return o.tchannelClusterAddr
}
-func (o *options) SetHTTPNodeAddr(value string) testOptions {
+func (o *options) SetHTTPNodeAddr(value string) TestOptions {
opts := *o
opts.httpNodeAddr = value
return &opts
@@ -409,7 +411,7 @@ func (o *options) HTTPNodeAddr() string {
return o.httpNodeAddr
}
-func (o *options) SetTChannelNodeAddr(value string) testOptions {
+func (o *options) SetTChannelNodeAddr(value string) TestOptions {
opts := *o
opts.tchannelNodeAddr = value
return &opts
@@ -419,7 +421,7 @@ func (o *options) TChannelNodeAddr() string {
return o.tchannelNodeAddr
}
-func (o *options) SetHTTPDebugAddr(value string) testOptions {
+func (o *options) SetHTTPDebugAddr(value string) TestOptions {
opts := *o
opts.httpDebugAddr = value
return &opts
@@ -429,7 +431,7 @@ func (o *options) HTTPDebugAddr() string {
return o.httpDebugAddr
}
-func (o *options) SetServerStateChangeTimeout(value time.Duration) testOptions {
+func (o *options) SetServerStateChangeTimeout(value time.Duration) TestOptions {
opts := *o
opts.serverStateChangeTimeout = value
return &opts
@@ -439,7 +441,7 @@ func (o *options) ServerStateChangeTimeout() time.Duration {
return o.serverStateChangeTimeout
}
-func (o *options) SetClusterConnectionTimeout(value time.Duration) testOptions {
+func (o *options) SetClusterConnectionTimeout(value time.Duration) TestOptions {
opts := *o
opts.clusterConnectionTimeout = value
return &opts
@@ -449,7 +451,7 @@ func (o *options) ClusterConnectionTimeout() time.Duration {
return o.clusterConnectionTimeout
}
-func (o *options) SetReadRequestTimeout(value time.Duration) testOptions {
+func (o *options) SetReadRequestTimeout(value time.Duration) TestOptions {
opts := *o
opts.readRequestTimeout = value
return &opts
@@ -459,7 +461,7 @@ func (o *options) ReadRequestTimeout() time.Duration {
return o.readRequestTimeout
}
-func (o *options) SetWriteRequestTimeout(value time.Duration) testOptions {
+func (o *options) SetWriteRequestTimeout(value time.Duration) TestOptions {
opts := *o
opts.writeRequestTimeout = value
return &opts
@@ -469,7 +471,7 @@ func (o *options) WriteRequestTimeout() time.Duration {
return o.writeRequestTimeout
}
-func (o *options) SetTruncateRequestTimeout(value time.Duration) testOptions {
+func (o *options) SetTruncateRequestTimeout(value time.Duration) TestOptions {
opts := *o
opts.truncateRequestTimeout = value
return &opts
@@ -479,7 +481,7 @@ func (o *options) TruncateRequestTimeout() time.Duration {
return o.truncateRequestTimeout
}
-func (o *options) SetWorkerPoolSize(value int) testOptions {
+func (o *options) SetWorkerPoolSize(value int) TestOptions {
opts := *o
opts.workerPoolSize = value
return &opts
@@ -489,7 +491,7 @@ func (o *options) WorkerPoolSize() int {
return o.workerPoolSize
}
-func (o *options) SetClusterDatabaseTopologyInitializer(value topology.Initializer) testOptions {
+func (o *options) SetClusterDatabaseTopologyInitializer(value topology.Initializer) TestOptions {
opts := *o
opts.clusterDatabaseTopologyInitializer = value
return &opts
@@ -499,7 +501,7 @@ func (o *options) ClusterDatabaseTopologyInitializer() topology.Initializer {
return o.clusterDatabaseTopologyInitializer
}
-func (o *options) SetUseTChannelClientForReading(value bool) testOptions {
+func (o *options) SetUseTChannelClientForReading(value bool) TestOptions {
opts := *o
opts.useTChannelClientForReading = value
return &opts
@@ -509,7 +511,7 @@ func (o *options) UseTChannelClientForReading() bool {
return o.useTChannelClientForReading
}
-func (o *options) SetUseTChannelClientForWriting(value bool) testOptions {
+func (o *options) SetUseTChannelClientForWriting(value bool) TestOptions {
opts := *o
opts.useTChannelClientForWriting = value
return &opts
@@ -519,7 +521,7 @@ func (o *options) UseTChannelClientForWriting() bool {
return o.useTChannelClientForWriting
}
-func (o *options) SetUseTChannelClientForTruncation(value bool) testOptions {
+func (o *options) SetUseTChannelClientForTruncation(value bool) TestOptions {
opts := *o
opts.useTChannelClientForTruncation = value
return &opts
@@ -531,7 +533,7 @@ func (o *options) UseTChannelClientForTruncation() bool {
func (o *options) SetDatabaseBlockRetrieverManager(
value block.DatabaseBlockRetrieverManager,
-) testOptions {
+) TestOptions {
opts := *o
opts.blockRetrieverManager = value
return &opts
@@ -541,7 +543,7 @@ func (o *options) DatabaseBlockRetrieverManager() block.DatabaseBlockRetrieverMa
return o.blockRetrieverManager
}
-func (o *options) SetVerifySeriesDebugFilePathPrefix(value string) testOptions {
+func (o *options) SetVerifySeriesDebugFilePathPrefix(value string) TestOptions {
opts := *o
opts.verifySeriesDebugFilePathPrefix = value
return &opts
@@ -555,7 +557,7 @@ func (o *options) WriteConsistencyLevel() topology.ConsistencyLevel {
return o.writeConsistencyLevel
}
-func (o *options) SetWriteConsistencyLevel(cLevel topology.ConsistencyLevel) testOptions {
+func (o *options) SetWriteConsistencyLevel(cLevel topology.ConsistencyLevel) TestOptions {
opts := *o
opts.writeConsistencyLevel = cLevel
return &opts
@@ -565,7 +567,7 @@ func (o *options) NumShards() int {
return o.numShards
}
-func (o *options) SetNumShards(value int) testOptions {
+func (o *options) SetNumShards(value int) TestOptions {
opts := *o
opts.numShards = value
return &opts
@@ -575,13 +577,13 @@ func (o *options) MaxWiredBlocks() uint {
return o.maxWiredBlocks
}
-func (o *options) SetMaxWiredBlocks(value uint) testOptions {
+func (o *options) SetMaxWiredBlocks(value uint) TestOptions {
opts := *o
opts.maxWiredBlocks = value
return &opts
}
-func (o *options) SetWriteNewSeriesAsync(value bool) testOptions {
+func (o *options) SetWriteNewSeriesAsync(value bool) TestOptions {
opts := *o
opts.writeNewSeriesAsync = value
return &opts
@@ -591,7 +593,7 @@ func (o *options) WriteNewSeriesAsync() bool {
return o.writeNewSeriesAsync
}
-func (o *options) SetFilePathPrefix(value string) testOptions {
+func (o *options) SetFilePathPrefix(value string) TestOptions {
opts := *o
opts.filePathPrefix = value
return &opts
@@ -601,7 +603,7 @@ func (o *options) FilePathPrefix() string {
return o.filePathPrefix
}
-func (o *options) SetProtoEncoding(value bool) testOptions {
+func (o *options) SetProtoEncoding(value bool) TestOptions {
opts := *o
opts.protoEncoding = value
return &opts
@@ -611,7 +613,7 @@ func (o *options) ProtoEncoding() bool {
return o.protoEncoding
}
-func (o *options) SetAssertTestDataEqual(value assertTestDataEqual) testOptions {
+func (o *options) SetAssertTestDataEqual(value assertTestDataEqual) TestOptions {
opts := *o
opts.assertEqual = value
return &opts
@@ -621,7 +623,7 @@ func (o *options) AssertTestDataEqual() assertTestDataEqual {
return o.assertEqual
}
-func (o *options) SetNowFn(value func() time.Time) testOptions {
+func (o *options) SetNowFn(value func() time.Time) TestOptions {
opts := *o
opts.nowFn = value
return &opts
diff --git a/src/dbnode/integration/peers_bootstrap_high_concurrency_test.go b/src/dbnode/integration/peers_bootstrap_high_concurrency_test.go
index c7e6540928..ee60e9d78d 100644
--- a/src/dbnode/integration/peers_bootstrap_high_concurrency_test.go
+++ b/src/dbnode/integration/peers_bootstrap_high_concurrency_test.go
@@ -50,7 +50,7 @@ func TestPeersBootstrapHighConcurrency(t *testing.T) {
namesp, err := namespace.NewMetadata(testNamespaces[0],
namespace.NewOptions().SetRetentionOptions(retentionOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -81,7 +81,7 @@ func TestPeersBootstrapHighConcurrency(t *testing.T) {
shardIDs = append(shardIDs, id)
}
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
blockSize := retentionOpts.BlockSize()
seriesMaps := generate.BlocksByStart([]generate.BlockConfig{
{IDs: shardIDs, NumPoints: 3, Start: now.Add(-3 * blockSize)},
@@ -93,16 +93,16 @@ func TestPeersBootstrapHighConcurrency(t *testing.T) {
require.NoError(t, err)
// Start the first server with filesystem bootstrapper
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
// Start the last server with peers and filesystem bootstrappers
- require.NoError(t, setups[1].startServer())
+ require.NoError(t, setups[1].StartServer())
log.Debug("servers are now up")
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/peers_bootstrap_index_aggregate_test.go b/src/dbnode/integration/peers_bootstrap_index_aggregate_test.go
index 88830e2a58..10a2d2890b 100644
--- a/src/dbnode/integration/peers_bootstrap_index_aggregate_test.go
+++ b/src/dbnode/integration/peers_bootstrap_index_aggregate_test.go
@@ -58,7 +58,7 @@ func TestPeersBootstrapIndexAggregateQuery(t *testing.T) {
SetIndexOptions(idxOpts)
ns1, err := namespace.NewMetadata(testNamespaces[0], nOpts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -74,7 +74,7 @@ func TestPeersBootstrapIndexAggregateQuery(t *testing.T) {
// Write test data for first node
// Write test data
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
fooSeries := generate.Series{
ID: ident.StringID("foo"),
@@ -120,18 +120,18 @@ func TestPeersBootstrapIndexAggregateQuery(t *testing.T) {
require.NoError(t, writeTestDataToDisk(ns1, setups[0], seriesMaps, 0))
// Start the first server with filesystem bootstrapper
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
// Start the remaining servers with peers and filesystem bootstrappers
- setups[1:].parallel(func(s *testSetup) {
- require.NoError(t, s.startServer())
+ setups[1:].parallel(func(s TestSetup) {
+ require.NoError(t, s.StartServer())
})
log.Debug("servers are now up")
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
@@ -142,7 +142,7 @@ func TestPeersBootstrapIndexAggregateQuery(t *testing.T) {
}
// Issue aggregate index queries to the second node which bootstrapped the metadata
- session, err := setups[1].m3dbClient.DefaultSession()
+ session, err := setups[1].M3DBClient().DefaultSession()
require.NoError(t, err)
start := now.Add(-rOpts.RetentionPeriod())
diff --git a/src/dbnode/integration/peers_bootstrap_index_test.go b/src/dbnode/integration/peers_bootstrap_index_test.go
index 1865b75a9b..6083bfc0f0 100644
--- a/src/dbnode/integration/peers_bootstrap_index_test.go
+++ b/src/dbnode/integration/peers_bootstrap_index_test.go
@@ -59,7 +59,7 @@ func TestPeersBootstrapIndexWithIndexingEnabled(t *testing.T) {
SetIndexOptions(idxOpts)
ns1, err := namespace.NewMetadata(testNamespaces[0], nOpts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{ns1}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -75,7 +75,7 @@ func TestPeersBootstrapIndexWithIndexingEnabled(t *testing.T) {
// Write test data for first node
// Write test data
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
fooSeries := generate.Series{
ID: ident.StringID("foo"),
@@ -121,16 +121,16 @@ func TestPeersBootstrapIndexWithIndexingEnabled(t *testing.T) {
require.NoError(t, writeTestDataToDisk(ns1, setups[0], seriesMaps, 0))
// Start the first server with filesystem bootstrapper
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
// Start the last server with peers and filesystem bootstrappers
- require.NoError(t, setups[1].startServer())
+ require.NoError(t, setups[1].StartServer())
log.Debug("servers are now up")
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
@@ -141,7 +141,7 @@ func TestPeersBootstrapIndexWithIndexingEnabled(t *testing.T) {
}
// Issue some index queries to the second node which bootstrapped the metadata
- session, err := setups[1].m3dbClient.DefaultSession()
+ session, err := setups[1].M3DBClient().DefaultSession()
require.NoError(t, err)
start := now.Add(-rOpts.RetentionPeriod())
diff --git a/src/dbnode/integration/peers_bootstrap_merge_local_test.go b/src/dbnode/integration/peers_bootstrap_merge_local_test.go
index 88610e236a..b6638d7476 100644
--- a/src/dbnode/integration/peers_bootstrap_merge_local_test.go
+++ b/src/dbnode/integration/peers_bootstrap_merge_local_test.go
@@ -61,7 +61,7 @@ func testPeersBootstrapMergeLocal(t *testing.T, setTestOpts setTestOptions, upda
require.NoError(t, err)
var (
- opts = newTestOptions(t).
+ opts = NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -95,7 +95,7 @@ func testPeersBootstrapMergeLocal(t *testing.T, setTestOpts setTestOptions, upda
defer closeFn()
// Write test data for first node, ensure to overflow past
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
cutoverAt := now.Add(retentionOpts.BufferFuture())
completeAt := now.Add(180 * time.Second)
blockSize := retentionOpts.BlockSize()
@@ -168,7 +168,7 @@ func testPeersBootstrapMergeLocal(t *testing.T, setTestOpts setTestOptions, upda
require.NoError(t, err)
// Start the first server with filesystem bootstrapper
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
secondNodeIsUp := make(chan struct{})
doneWriting := make(chan struct{})
@@ -181,10 +181,10 @@ func testPeersBootstrapMergeLocal(t *testing.T, setTestOpts setTestOptions, upda
<-secondNodeIsUp
// Progress time before writing data directly to second node
- setups[1].setNowFn(completeAt)
+ setups[1].SetNowFn(completeAt)
// Write data that "arrives" at the second node directly
- err := setups[1].writeBatch(namesp.ID(),
+ err := setups[1].WriteBatch(namesp.ID(),
directWritesSeriesMaps[xtime.ToUnixNano(now)])
if err != nil {
panic(err)
@@ -194,7 +194,7 @@ func testPeersBootstrapMergeLocal(t *testing.T, setTestOpts setTestOptions, upda
}()
// Start the last server with peers and filesystem bootstrappers
- require.NoError(t, setups[1].startServer())
+ require.NoError(t, setups[1].StartServer())
log.Debug("servers are now up")
secondNodeIsUp <- struct{}{}
@@ -202,8 +202,8 @@ func testPeersBootstrapMergeLocal(t *testing.T, setTestOpts setTestOptions, upda
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/peers_bootstrap_merge_peer_blocks_test.go b/src/dbnode/integration/peers_bootstrap_merge_peer_blocks_test.go
index 88a2623efd..40f1daca66 100644
--- a/src/dbnode/integration/peers_bootstrap_merge_peer_blocks_test.go
+++ b/src/dbnode/integration/peers_bootstrap_merge_peer_blocks_test.go
@@ -59,7 +59,7 @@ func testPeersBootstrapMergePeerBlocks(t *testing.T, setTestOpts setTestOptions,
namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().
SetRetentionOptions(retentionOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -78,7 +78,7 @@ func testPeersBootstrapMergePeerBlocks(t *testing.T, setTestOpts setTestOptions,
defer closeFn()
// Write test data alternating missing data for left/right nodes
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
blockSize := retentionOpts.BlockSize()
// Make sure we have multiple blocks of data for multiple series to exercise
// the grouping and aggregating logic in the client peer bootstrapping process
@@ -120,18 +120,18 @@ func testPeersBootstrapMergePeerBlocks(t *testing.T, setTestOpts setTestOptions,
require.NoError(t, writeTestDataToDisk(namesp, setups[1], right, 0))
// Start the first two servers with filesystem bootstrappers
- setups[:2].parallel(func(s *testSetup) {
- require.NoError(t, s.startServer())
+ setups[:2].parallel(func(s TestSetup) {
+ require.NoError(t, s.StartServer())
})
// Start the last server with peers and filesystem bootstrappers
- require.NoError(t, setups[2].startServer())
+ require.NoError(t, setups[2].StartServer())
log.Debug("servers are now up")
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/peers_bootstrap_node_down_test.go b/src/dbnode/integration/peers_bootstrap_node_down_test.go
index 40cf70d3e1..5518b1bb44 100644
--- a/src/dbnode/integration/peers_bootstrap_node_down_test.go
+++ b/src/dbnode/integration/peers_bootstrap_node_down_test.go
@@ -50,7 +50,7 @@ func TestPeersBootstrapNodeDown(t *testing.T) {
namesp, err := namespace.NewMetadata(testNamespaces[0],
namespace.NewOptions().SetRetentionOptions(retentionOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -66,7 +66,7 @@ func TestPeersBootstrapNodeDown(t *testing.T) {
defer closeFn()
// Write test data for first node
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
blockSize := retentionOpts.BlockSize()
// Make sure we have multiple blocks of data for multiple series to exercise
// the grouping and aggregating logic in the client peer bootstrapping process
@@ -81,16 +81,16 @@ func TestPeersBootstrapNodeDown(t *testing.T) {
require.NoError(t, err)
// Start the first server with filesystem bootstrapper
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
// Leave second node down, start the last server with peers and filesystem bootstrappers
- require.NoError(t, setups[2].startServer())
+ require.NoError(t, setups[2].StartServer())
log.Debug("first and third servers are now up")
// Stop the servers
defer func() {
- testSetups{setups[0], setups[2]}.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ testSetups{setups[0], setups[2]}.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/peers_bootstrap_none_available_test.go b/src/dbnode/integration/peers_bootstrap_none_available_test.go
index 83ff7bef5f..b4d929c391 100644
--- a/src/dbnode/integration/peers_bootstrap_none_available_test.go
+++ b/src/dbnode/integration/peers_bootstrap_none_available_test.go
@@ -55,7 +55,7 @@ func TestPeersBootstrapNoneAvailable(t *testing.T) {
SetBufferFuture(2 * time.Minute)
namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(retentionOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -107,13 +107,13 @@ func TestPeersBootstrapNoneAvailable(t *testing.T) {
// Start both servers "simultaneously"
go func() {
- if err := setups[0].startServer(); err != nil {
+ if err := setups[0].StartServer(); err != nil {
panic(err)
}
serversAreUp.Done()
}()
go func() {
- if err := setups[1].startServer(); err != nil {
+ if err := setups[1].StartServer(); err != nil {
panic(err)
}
serversAreUp.Done()
@@ -124,8 +124,8 @@ func TestPeersBootstrapNoneAvailable(t *testing.T) {
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/peers_bootstrap_select_best_test.go b/src/dbnode/integration/peers_bootstrap_select_best_test.go
index 7ed9a764f7..62efd41497 100644
--- a/src/dbnode/integration/peers_bootstrap_select_best_test.go
+++ b/src/dbnode/integration/peers_bootstrap_select_best_test.go
@@ -49,7 +49,7 @@ func TestPeersBootstrapSelectBest(t *testing.T) {
SetBufferFuture(2 * time.Minute)
namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(retentionOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -65,7 +65,7 @@ func TestPeersBootstrapSelectBest(t *testing.T) {
defer closeFn()
// Write test data alternating missing data for left/right nodes
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
blockSize := retentionOpts.BlockSize()
// Make sure we have multiple blocks of data for multiple series to exercise
// the grouping and aggregating logic in the client peer bootstrapping process
@@ -105,18 +105,18 @@ func TestPeersBootstrapSelectBest(t *testing.T) {
require.NoError(t, writeTestDataToDisk(namesp, setups[1], right, 0))
// Start the first two servers with filesystem bootstrappers
- setups[:2].parallel(func(s *testSetup) {
- require.NoError(t, s.startServer())
+ setups[:2].parallel(func(s TestSetup) {
+ require.NoError(t, s.StartServer())
})
// Start the last server with peers and filesystem bootstrappers
- require.NoError(t, setups[2].startServer())
+ require.NoError(t, setups[2].StartServer())
log.Debug("servers are now up")
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/peers_bootstrap_simple_test.go b/src/dbnode/integration/peers_bootstrap_simple_test.go
index f52dce2f08..620bdc0b15 100644
--- a/src/dbnode/integration/peers_bootstrap_simple_test.go
+++ b/src/dbnode/integration/peers_bootstrap_simple_test.go
@@ -56,7 +56,7 @@ func testPeersBootstrapSimple(t *testing.T, setTestOpts setTestOptions, updateIn
SetBufferFuture(2 * time.Minute)
namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(retentionOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -75,7 +75,7 @@ func testPeersBootstrapSimple(t *testing.T, setTestOpts setTestOptions, updateIn
defer closeFn()
// Write test data for first node
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
blockSize := retentionOpts.BlockSize()
// Make sure we have multiple blocks of data for multiple series to exercise
// the grouping and aggregating logic in the client peer bootstrapping process
@@ -93,16 +93,16 @@ func testPeersBootstrapSimple(t *testing.T, setTestOpts setTestOptions, updateIn
require.NoError(t, writeTestDataToDisk(namesp, setups[0], seriesMaps, 0))
// Start the first server with filesystem bootstrapper
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
// Start the last server with peers and filesystem bootstrappers
- require.NoError(t, setups[1].startServer())
+ require.NoError(t, setups[1].StartServer())
log.Debug("servers are now up")
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/peers_bootstrap_single_node_test.go b/src/dbnode/integration/peers_bootstrap_single_node_test.go
index b62b768a33..288d3c293c 100644
--- a/src/dbnode/integration/peers_bootstrap_single_node_test.go
+++ b/src/dbnode/integration/peers_bootstrap_single_node_test.go
@@ -50,7 +50,7 @@ func TestPeersBootstrapSingleNode(t *testing.T) {
SetBufferFuture(2 * time.Minute)
namesp, err := namespace.NewMetadata(testNamespaces[0], namespace.NewOptions().SetRetentionOptions(retentionOpts))
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -64,7 +64,7 @@ func TestPeersBootstrapSingleNode(t *testing.T) {
defer closeFn()
// Write test data
- now := setups[0].getNowFn()
+ now := setups[0].NowFn()()
blockSize := retentionOpts.BlockSize()
seriesMaps := generate.BlocksByStart([]generate.BlockConfig{
{IDs: []string{"foo", "baz"}, NumPoints: 90, Start: now.Add(-4 * blockSize)},
@@ -77,16 +77,16 @@ func TestPeersBootstrapSingleNode(t *testing.T) {
// Set the time to one blockSize in the future (for which we do not have
// a fileset file) to ensure we try and use the peer bootstrapper.
- setups[0].setNowFn(now.Add(blockSize))
+ setups[0].SetNowFn(now.Add(blockSize))
// Start the server with peers and filesystem bootstrappers
- require.NoError(t, setups[0].startServer())
+ require.NoError(t, setups[0].StartServer())
log.Debug("servers are now up")
// Stop the servers
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
diff --git a/src/dbnode/integration/repair_test.go b/src/dbnode/integration/repair_test.go
index ff0364b1f7..b3a93e7bca 100644
--- a/src/dbnode/integration/repair_test.go
+++ b/src/dbnode/integration/repair_test.go
@@ -189,7 +189,7 @@ func testRepair(
SetRetentionOptions(retentionOpts)
namesp, err := namespace.NewMetadata(testNamespaces[0], nsOpts)
require.NoError(t, err)
- opts := newTestOptions(t).
+ opts := NewTestOptions(t).
SetNamespaces([]namespace.Metadata{namesp}).
// Use TChannel clients for writing / reading because we want to target individual nodes at a time
// and not write/read all nodes in the cluster.
@@ -206,9 +206,9 @@ func testRepair(
// Ensure that the current time is set such that the previous block is flushable.
blockSize := retentionOpts.BlockSize()
- now := setups[0].getNowFn().Truncate(blockSize).Add(retentionOpts.BufferPast()).Add(time.Second)
+ now := setups[0].NowFn()().Truncate(blockSize).Add(retentionOpts.BufferPast()).Add(time.Second)
for _, setup := range setups {
- setup.setNowFn(now)
+ setup.SetNowFn(now)
}
node0Data, node1Data, node2Data, allData := genRepairData(now, blockSize)
@@ -223,8 +223,8 @@ func testRepair(
}
// Start the servers with filesystem bootstrappers.
- setups.parallel(func(s *testSetup) {
- if err := s.startServer(); err != nil {
+ setups.parallel(func(s TestSetup) {
+ if err := s.StartServer(); err != nil {
panic(err)
}
})
@@ -232,18 +232,18 @@ func testRepair(
// Stop the servers.
defer func() {
- setups.parallel(func(s *testSetup) {
- require.NoError(t, s.stopServer())
+ setups.parallel(func(s TestSetup) {
+ require.NoError(t, s.StopServer())
})
log.Debug("servers are now down")
}()
require.True(t, waitUntil(func() bool {
for _, setup := range setups {
- if err := checkFlushedDataFiles(setup.shardSet, setup.storageOpts, namesp.ID(), allData); err != nil {
+ if err := checkFlushedDataFiles(setup.ShardSet(), setup.StorageOpts(), namesp.ID(), allData); err != nil {
// Increment the time each time it fails to make sure background processes are able to proceed.
for _, s := range setups {
- s.setNowFn(s.getNowFn().Add(time.Millisecond))
+ s.SetNowFn(s.NowFn()().Add(time.Millisecond))
}
return false
}
diff --git a/src/dbnode/integration/roundtrip_test.go b/src/dbnode/integration/roundtrip_test.go
index 18e9270acb..ddc2044e8d 100644
--- a/src/dbnode/integration/roundtrip_test.go
+++ b/src/dbnode/integration/roundtrip_test.go
@@ -1,5 +1,3 @@
-// +build integration
-
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -35,7 +33,7 @@ import (
"github.com/stretchr/testify/require"
)
-type setTestOptions func(t *testing.T, testOpts testOptions) testOptions
+type setTestOptions func(t *testing.T, testOpts TestOptions) TestOptions
func TestRoundtrip(t *testing.T) {
testRoundtrip(t, nil, nil)
@@ -51,7 +49,7 @@ func setProtoTestInputConfig(inputData []generate.BlockConfig) {
}
}
-func setProtoTestOptions(t *testing.T, testOpts testOptions) testOptions {
+func setProtoTestOptions(t *testing.T, testOpts TestOptions) TestOptions {
var namespaces []namespace.Metadata
for _, nsMeta := range testOpts.Namespaces() {
nsOpts := nsMeta.Options().SetSchemaHistory(testSchemaHistory)
@@ -83,24 +81,24 @@ func assertProtoDataEqual(t *testing.T, expected, actual []generate.TestValue) b
}
func testRoundtrip(t *testing.T, setTestOpts setTestOptions, updateInputConfig generate.UpdateBlockConfig) {
- if testing.Short() {
- t.SkipNow() // Just skip if we're doing a short run
- }
+ // if testing.Short() {
+ // t.SkipNow() // Just skip if we're doing a short run
+ // }
// Test setup
- testOpts := newTestOptions(t).
+ testOpts := NewTestOptions(t).
SetTickMinimumInterval(time.Second).
SetUseTChannelClientForReading(false).
SetUseTChannelClientForWriting(false)
if setTestOpts != nil {
testOpts = setTestOpts(t, testOpts)
}
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
// Input data setup
blockSize := namespace.NewOptions().RetentionOptions().BlockSize()
- now := testSetup.getNowFn()
+ now := testSetup.NowFn()()
inputData := []generate.BlockConfig{
{IDs: []string{"foo", "bar"}, NumPoints: 100, Start: now},
{IDs: []string{"foo", "baz"}, NumPoints: 50, Start: now.Add(blockSize)},
@@ -110,30 +108,30 @@ func testRoundtrip(t *testing.T, setTestOpts setTestOptions, updateInputConfig g
}
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Debug("round trip test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
// Write test data
seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
for _, input := range inputData {
- testSetup.setNowFn(input.Start)
+ testSetup.SetNowFn(input.Start)
testData := generate.Block(input)
seriesMaps[xtime.ToUnixNano(input.Start)] = testData
- require.NoError(t, testSetup.writeBatch(testNamespaces[0], testData))
+ require.NoError(t, testSetup.WriteBatch(testNamespaces[0], testData))
}
log.Debug("test data is now written")
// Advance time and sleep for a long enough time so data blocks are sealed during ticking
- testSetup.setNowFn(testSetup.getNowFn().Add(blockSize * 2))
- testSetup.sleepFor10xTickMinimumInterval()
+ testSetup.SetNowFn(testSetup.NowFn()().Add(blockSize * 2))
+ testSetup.SleepFor10xTickMinimumInterval()
// Verify in-memory data match what we've written
verifySeriesMaps(t, testSetup, testNamespaces[0], seriesMaps)
diff --git a/src/dbnode/integration/serve.go b/src/dbnode/integration/serve.go
index 53ab353bb3..2b56026f2f 100644
--- a/src/dbnode/integration/serve.go
+++ b/src/dbnode/integration/serve.go
@@ -105,7 +105,8 @@ func openAndServe(
contextPool := opts.ContextPool()
ttopts := tchannelthrift.NewOptions()
service := ttnode.NewService(db, ttopts)
- nativeNodeClose, err := ttnode.NewServer(service, tchannelNodeAddr, contextPool, nil).ListenAndServe()
+ nodeOpts := ttnode.NewOptions(nil)
+ nativeNodeClose, err := ttnode.NewServer(service, tchannelNodeAddr, contextPool, nodeOpts).ListenAndServe()
if err != nil {
return fmt.Errorf("could not open tchannelthrift interface %s: %v", tchannelNodeAddr, err)
}
diff --git a/src/dbnode/integration/setup.go b/src/dbnode/integration/setup.go
index 9ee3eb9aca..9186052bc7 100644
--- a/src/dbnode/integration/setup.go
+++ b/src/dbnode/integration/setup.go
@@ -41,11 +41,16 @@ import (
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/block"
+ "github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
+ bcl "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/commitlog"
+ bfs "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs"
"github.com/m3db/m3/src/dbnode/storage/cluster"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/series"
@@ -56,6 +61,7 @@ import (
xsync "github.com/m3db/m3/src/x/sync"
"github.com/stretchr/testify/require"
+ "github.com/uber-go/tally"
tchannel "github.com/uber/tchannel-go"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
@@ -94,10 +100,11 @@ var _ topology.MapProvider = &testSetup{}
type testSetup struct {
t *testing.T
- opts testOptions
+ opts TestOptions
schemaReg namespace.SchemaRegistry
logger *zap.Logger
+ scope tally.TestScope
db cluster.Database
storageOpts storage.Options
@@ -109,7 +116,7 @@ type testSetup struct {
shardSet sharding.ShardSet
getNowFn clock.NowFn
setNowFn nowSetterFn
- tchannelClient rpc.TChanNode
+ tchannelClient *TestTChannelClient
m3dbClient client.Client
// We need two distinct clients where one has the origin set to the same ID as the
// node itself (I.E) the client will behave exactly as if it is the node itself
@@ -133,9 +140,64 @@ type testSetup struct {
closedCh chan struct{}
}
-func newTestSetup(t *testing.T, opts testOptions, fsOpts fs.Options) (*testSetup, error) {
+// TestSetup is a test setup.
+type TestSetup interface {
+ topology.MapProvider
+
+ Opts() TestOptions
+ SetOpts(TestOptions)
+ FilesystemOpts() fs.Options
+ AssertEqual(*testing.T, []generate.TestValue, []generate.TestValue) bool
+ DB() cluster.Database
+ Scope() tally.TestScope
+ M3DBClient() client.Client
+ M3DBVerificationAdminClient() client.AdminClient
+ Namespaces() []namespace.Metadata
+ TopologyInitializer() topology.Initializer
+ SetTopologyInitializer(topology.Initializer)
+ Fetch(req *rpc.FetchRequest) ([]generate.TestValue, error)
+ FilePathPrefix() string
+ StorageOpts() storage.Options
+ SetStorageOpts(storage.Options)
+ Origin() topology.Host
+ ServerIsBootstrapped() bool
+ StopServer() error
+ StartServer() error
+ StartServerDontWaitBootstrap() error
+ NowFn() clock.NowFn
+ SetNowFn(time.Time)
+ Close()
+ WriteBatch(ident.ID, generate.SeriesBlock) error
+ ShouldBeEqual() bool
+ // *NOTE*: This method is deprecated and should not be used in future tests.
+ // Also, we should migrate existing tests when we touch them away from using this.
+ SleepFor10xTickMinimumInterval()
+ BlockLeaseManager() block.LeaseManager
+ ShardSet() sharding.ShardSet
+ SetShardSet(sharding.ShardSet)
+ GeneratorOptions(retention.Options) generate.Options
+ MaybeResetClients() error
+ SchemaRegistry() namespace.SchemaRegistry
+ NamespaceMetadataOrFail(ident.ID) namespace.Metadata
+ MustSetTickMinimumInterval(time.Duration)
+ WaitUntilServerIsBootstrapped() error
+ WaitUntilServerIsUp() error
+ WaitUntilServerIsDown() error
+ Truncate(*rpc.TruncateRequest) (int64, error)
+ InitializeBootstrappers(opts InitializeBootstrappersOptions) error
+}
+
+type storageOption func(storage.Options) storage.Options
+
+// NewTestSetup returns a new test setup for non-dockerized integration tests.
+func NewTestSetup(
+ t *testing.T,
+ opts TestOptions,
+ fsOpts fs.Options,
+ storageOptFns ...storageOption,
+) (TestSetup, error) {
if opts == nil {
- opts = newTestOptions(t)
+ opts = NewTestOptions(t)
}
nsInit := opts.NamespaceInitializer()
@@ -189,6 +251,10 @@ func newTestSetup(t *testing.T, opts testOptions, fsOpts fs.Options) (*testSetup
storageOpts.InstrumentOptions().SetLogger(logger))
}
+ scope := tally.NewTestScope("", nil)
+ storageOpts = storageOpts.SetInstrumentOptions(
+ storageOpts.InstrumentOptions().SetMetricsScope(scope))
+
// Use specified series cache policy from environment if set.
seriesCachePolicy := strings.ToLower(os.Getenv("TEST_SERIES_CACHE_POLICY"))
if seriesCachePolicy != "" {
@@ -265,7 +331,7 @@ func newTestSetup(t *testing.T, opts testOptions, fsOpts fs.Options) (*testSetup
}
// Set up tchannel client
- channel, tc, err := tchannelClient(tchannelNodeAddr)
+ tchanClient, err := NewTChannelClient("integration-test", tchannelNodeAddr)
if err != nil {
return nil, err
}
@@ -346,7 +412,7 @@ func newTestSetup(t *testing.T, opts testOptions, fsOpts fs.Options) (*testSetup
// Do not need a block retriever for CacheAll policy
default:
blockRetrieverMgr := block.NewDatabaseBlockRetrieverManager(
- func(md namespace.Metadata) (block.DatabaseBlockRetriever, error) {
+ func(md namespace.Metadata, shardSet sharding.ShardSet) (block.DatabaseBlockRetriever, error) {
retrieverOpts := fs.NewBlockRetrieverOptions().
SetBlockLeaseManager(blockLeaseManager)
retriever, err := fs.NewBlockRetriever(retrieverOpts, fsOpts)
@@ -354,7 +420,7 @@ func newTestSetup(t *testing.T, opts testOptions, fsOpts fs.Options) (*testSetup
return nil, err
}
- if err := retriever.Open(md); err != nil {
+ if err := retriever.Open(md, shardSet); err != nil {
return nil, err
}
return retriever, nil
@@ -389,11 +455,16 @@ func newTestSetup(t *testing.T, opts testOptions, fsOpts fs.Options) (*testSetup
opts = opts.SetVerifySeriesDebugFilePathPrefix(debugFilePrefix)
}
+ for _, fn := range storageOptFns {
+ storageOpts = fn(storageOpts)
+ }
+
return &testSetup{
t: t,
opts: opts,
schemaReg: schemaReg,
logger: logger,
+ scope: scope,
storageOpts: storageOpts,
blockLeaseManager: blockLeaseManager,
fsOpts: fsOpts,
@@ -403,12 +474,11 @@ func newTestSetup(t *testing.T, opts testOptions, fsOpts fs.Options) (*testSetup
shardSet: shardSet,
getNowFn: getNowFn,
setNowFn: setNowFn,
- tchannelClient: tc,
+ tchannelClient: tchanClient,
m3dbClient: adminClient.(client.Client),
m3dbAdminClient: adminClient,
m3dbVerificationAdminClient: verificationAdminClient,
workerPool: workerPool,
- channel: channel,
filePathPrefix: filePathPrefix,
namespaces: opts.Namespaces(),
doneCh: make(chan struct{}),
@@ -462,8 +532,91 @@ func guessBestTruncateBlockSize(mds []namespace.Metadata) (time.Duration, bool)
// otherwise, we are guessing
return guess, true
}
+func (ts *testSetup) ShouldBeEqual() bool {
+ return ts.assertEqual == nil
+}
+
+func (ts *testSetup) AssertEqual(t *testing.T, a, b []generate.TestValue) bool {
+ return ts.assertEqual(t, a, b)
+}
+
+func (ts *testSetup) DB() cluster.Database {
+ return ts.db
+}
+
+func (ts *testSetup) Scope() tally.TestScope {
+ return ts.scope
+}
+
+func (ts *testSetup) M3DBClient() client.Client {
+ return ts.m3dbClient
+}
+
+func (ts *testSetup) M3DBVerificationAdminClient() client.AdminClient {
+ return ts.m3dbVerificationAdminClient
+}
+
+func (ts *testSetup) Namespaces() []namespace.Metadata {
+ return ts.namespaces
+}
+
+func (ts *testSetup) NowFn() clock.NowFn {
+ return ts.getNowFn
+}
+
+func (ts *testSetup) SetNowFn(t time.Time) {
+ ts.setNowFn(t)
+}
+
+func (ts *testSetup) FilesystemOpts() fs.Options {
+ return ts.fsOpts
+}
+
+func (ts *testSetup) Opts() TestOptions {
+ return ts.opts
+}
+
+func (ts *testSetup) SetOpts(opts TestOptions) {
+ ts.opts = opts
+}
+
+func (ts *testSetup) Origin() topology.Host {
+ return ts.origin
+}
+
+func (ts *testSetup) FilePathPrefix() string {
+ return ts.filePathPrefix
+}
+
+func (ts *testSetup) StorageOpts() storage.Options {
+ return ts.storageOpts
+}
+
+func (ts *testSetup) SetStorageOpts(opts storage.Options) {
+ ts.storageOpts = opts
+}
+
+func (ts *testSetup) TopologyInitializer() topology.Initializer {
+ return ts.topoInit
+}
+
+func (ts *testSetup) SetTopologyInitializer(init topology.Initializer) {
+ ts.topoInit = init
+}
+
+func (ts *testSetup) BlockLeaseManager() block.LeaseManager {
+ return ts.blockLeaseManager
+}
+
+func (ts *testSetup) ShardSet() sharding.ShardSet {
+ return ts.shardSet
+}
+
+func (ts *testSetup) SetShardSet(shardSet sharding.ShardSet) {
+ ts.shardSet = shardSet
+}
-func (ts *testSetup) namespaceMetadataOrFail(id ident.ID) namespace.Metadata {
+func (ts *testSetup) NamespaceMetadataOrFail(id ident.ID) namespace.Metadata {
for _, md := range ts.namespaces {
if md.ID().Equal(id) {
return md
@@ -473,7 +626,7 @@ func (ts *testSetup) namespaceMetadataOrFail(id ident.ID) namespace.Metadata {
return nil
}
-func (ts *testSetup) generatorOptions(ropts retention.Options) generate.Options {
+func (ts *testSetup) GeneratorOptions(ropts retention.Options) generate.Options {
var (
storageOpts = ts.storageOpts
fsOpts = storageOpts.CommitLogOptions().FilesystemOptions()
@@ -492,46 +645,46 @@ func (ts *testSetup) generatorOptions(ropts retention.Options) generate.Options
SetEncoderPool(storageOpts.EncoderPool())
}
-func (ts *testSetup) serverIsBootstrapped() bool {
+func (ts *testSetup) ServerIsBootstrapped() bool {
resp, err := ts.health()
return err == nil && resp.Bootstrapped
}
-func (ts *testSetup) serverIsUp() bool {
+func (ts *testSetup) ServerIsUp() bool {
_, err := ts.health()
return err == nil
}
-func (ts *testSetup) serverIsDown() bool {
- return !ts.serverIsUp()
+func (ts *testSetup) ServerIsDown() bool {
+ return !ts.ServerIsUp()
}
-func (ts *testSetup) waitUntilServerIsBootstrapped() error {
- if waitUntil(ts.serverIsBootstrapped, ts.opts.ServerStateChangeTimeout()) {
+func (ts *testSetup) WaitUntilServerIsBootstrapped() error {
+ if waitUntil(ts.ServerIsBootstrapped, ts.opts.ServerStateChangeTimeout()) {
return nil
}
return errServerStartTimedOut
}
-func (ts *testSetup) waitUntilServerIsUp() error {
- if waitUntil(ts.serverIsUp, ts.opts.ServerStateChangeTimeout()) {
+func (ts *testSetup) WaitUntilServerIsUp() error {
+ if waitUntil(ts.ServerIsUp, ts.opts.ServerStateChangeTimeout()) {
return nil
}
return errServerStopTimedOut
}
-func (ts *testSetup) waitUntilServerIsDown() error {
- if waitUntil(ts.serverIsDown, ts.opts.ServerStateChangeTimeout()) {
+func (ts *testSetup) WaitUntilServerIsDown() error {
+ if waitUntil(ts.ServerIsDown, ts.opts.ServerStateChangeTimeout()) {
return nil
}
return errServerStopTimedOut
}
-func (ts *testSetup) startServerDontWaitBootstrap() error {
+func (ts *testSetup) StartServerDontWaitBootstrap() error {
return ts.startServerBase(false)
}
-func (ts *testSetup) startServer() error {
+func (ts *testSetup) StartServer() error {
return ts.startServerBase(true)
}
@@ -563,8 +716,8 @@ func (ts *testSetup) startServerBase(waitForBootstrap bool) error {
return err
}
- // Check if clients were closed by stopServer and need to be re-created.
- ts.maybeResetClients()
+ // Check if clients were closed by StopServer and need to be re-created.
+ ts.MaybeResetClients()
go func() {
if err := openAndServe(
@@ -581,9 +734,9 @@ func (ts *testSetup) startServerBase(waitForBootstrap bool) error {
ts.closedCh <- struct{}{}
}()
- waitFn := ts.waitUntilServerIsUp
+ waitFn := ts.WaitUntilServerIsUp
if waitForBootstrap {
- waitFn = ts.waitUntilServerIsBootstrapped
+ waitFn = ts.WaitUntilServerIsBootstrapped
}
go func() {
select {
@@ -601,7 +754,7 @@ func (ts *testSetup) startServerBase(waitForBootstrap bool) error {
return err
}
-func (ts *testSetup) stopServer() error {
+func (ts *testSetup) StopServer() error {
ts.doneCh <- struct{}{}
if ts.m3dbClient.DefaultSessionActive() {
@@ -615,7 +768,7 @@ func (ts *testSetup) stopServer() error {
defer session.Close()
}
- if err := ts.waitUntilServerIsDown(); err != nil {
+ if err := ts.WaitUntilServerIsDown(); err != nil {
return err
}
@@ -624,32 +777,38 @@ func (ts *testSetup) stopServer() error {
return nil
}
-func (ts *testSetup) writeBatch(namespace ident.ID, seriesList generate.SeriesBlock) error {
+func (ts *testSetup) WriteBatch(namespace ident.ID, seriesList generate.SeriesBlock) error {
if ts.opts.UseTChannelClientForWriting() {
- return tchannelClientWriteBatch(ts.tchannelClient, ts.opts.WriteRequestTimeout(), namespace, seriesList)
+ return ts.tchannelClient.TChannelClientWriteBatch(
+ ts.opts.WriteRequestTimeout(), namespace, seriesList)
}
return m3dbClientWriteBatch(ts.m3dbClient, ts.workerPool, namespace, seriesList)
}
-func (ts *testSetup) fetch(req *rpc.FetchRequest) ([]generate.TestValue, error) {
+func (ts *testSetup) Fetch(req *rpc.FetchRequest) ([]generate.TestValue, error) {
if ts.opts.UseTChannelClientForReading() {
- return tchannelClientFetch(ts.tchannelClient, ts.opts.ReadRequestTimeout(), req)
+ fetched, err := ts.tchannelClient.TChannelClientFetch(ts.opts.ReadRequestTimeout(), req)
+ if err != nil {
+ return nil, err
+ }
+ dp := toDatapoints(fetched)
+ return dp, nil
}
return m3dbClientFetch(ts.m3dbClient, req)
}
-func (ts *testSetup) truncate(req *rpc.TruncateRequest) (int64, error) {
+func (ts *testSetup) Truncate(req *rpc.TruncateRequest) (int64, error) {
if ts.opts.UseTChannelClientForTruncation() {
- return tchannelClientTruncate(ts.tchannelClient, ts.opts.TruncateRequestTimeout(), req)
+ return ts.tchannelClient.TChannelClientTruncate(ts.opts.TruncateRequestTimeout(), req)
}
return m3dbClientTruncate(ts.m3dbClient, req)
}
func (ts *testSetup) health() (*rpc.NodeHealthResult_, error) {
- return tchannelClientHealth(ts.tchannelClient)
+ return ts.tchannelClient.TChannelClientHealth(5 * time.Second)
}
-func (ts *testSetup) close() {
+func (ts *testSetup) Close() {
if ts.channel != nil {
ts.channel.Close()
}
@@ -658,7 +817,7 @@ func (ts *testSetup) close() {
}
}
-func (ts *testSetup) mustSetTickMinimumInterval(tickMinInterval time.Duration) {
+func (ts *testSetup) MustSetTickMinimumInterval(tickMinInterval time.Duration) {
runtimeMgr := ts.storageOpts.RuntimeOptionsManager()
existingOptions := runtimeMgr.Get()
newOptions := existingOptions.SetTickMinimumInterval(tickMinInterval)
@@ -669,7 +828,7 @@ func (ts *testSetup) mustSetTickMinimumInterval(tickMinInterval time.Duration) {
}
// convenience wrapper used to ensure a tick occurs
-func (ts *testSetup) sleepFor10xTickMinimumInterval() {
+func (ts *testSetup) SleepFor10xTickMinimumInterval() {
// Check the runtime options manager instead of relying on ts.opts
// because the tick interval can change at runtime.
runtimeMgr := ts.storageOpts.RuntimeOptionsManager()
@@ -712,9 +871,9 @@ func (ts *testSetup) httpDebugAddr() string {
return *httpDebugAddr
}
-func (ts *testSetup) maybeResetClients() error {
+func (ts *testSetup) MaybeResetClients() error {
if ts.m3dbClient == nil {
- // Recreate the clients as their session was destroyed by stopServer()
+ // Recreate the clients as their session was destroyed by StopServer()
adminClient, verificationAdminClient, err := newClients(
ts.topoInit, ts.opts, ts.schemaReg, ts.hostID, ts.tchannelNodeAddr())
if err != nil {
@@ -728,11 +887,85 @@ func (ts *testSetup) maybeResetClients() error {
return nil
}
+func (ts *testSetup) SchemaRegistry() namespace.SchemaRegistry {
+ return ts.schemaReg
+}
+
+// InitializeBootstrappersOptions supplies options for bootstrapper initialization.
+type InitializeBootstrappersOptions struct {
+ CommitLogOptions commitlog.Options
+ WithCommitLog bool
+ WithFileSystem bool
+}
+
+func (o InitializeBootstrappersOptions) validate() error {
+ if o.WithCommitLog && o.CommitLogOptions == nil {
+ return errors.New("commit log options required when initializing a commit log bootstrapper")
+ }
+ return nil
+}
+
+func (ts *testSetup) InitializeBootstrappers(opts InitializeBootstrappersOptions) error {
+ var err error
+ if err := opts.validate(); err != nil {
+ return err
+ }
+
+ bs := bootstrapper.NewNoOpAllBootstrapperProvider()
+ storageOpts := ts.StorageOpts()
+ bsOpts := newDefaulTestResultOptions(storageOpts)
+ fsOpts := storageOpts.CommitLogOptions().FilesystemOptions()
+ if opts.WithCommitLog {
+ bclOpts := bcl.NewOptions().
+ SetResultOptions(bsOpts).
+ SetCommitLogOptions(opts.CommitLogOptions).
+ SetRuntimeOptionsManager(runtime.NewOptionsManager())
+ bs, err = bcl.NewCommitLogBootstrapperProvider(
+ bclOpts, mustInspectFilesystem(fsOpts), bs)
+ if err != nil {
+ return err
+ }
+ }
+
+ if opts.WithFileSystem {
+ persistMgr, err := fs.NewPersistManager(fsOpts)
+ if err != nil {
+ return err
+ }
+ storageIdxOpts := storageOpts.IndexOptions()
+ compactor, err := newCompactorWithErr(storageIdxOpts)
+ if err != nil {
+ return err
+ }
+ bfsOpts := bfs.NewOptions().
+ SetResultOptions(bsOpts).
+ SetFilesystemOptions(fsOpts).
+ SetIndexOptions(storageIdxOpts).
+ SetPersistManager(persistMgr).
+ SetCompactor(compactor)
+ bs, err = bfs.NewFileSystemBootstrapperProvider(bfsOpts, bs)
+ if err != nil {
+ return err
+ }
+ }
+
+ processOpts := bootstrap.NewProcessOptions().
+ SetTopologyMapProvider(ts).
+ SetOrigin(ts.Origin())
+ process, err := bootstrap.NewProcessProvider(bs, processOpts, bsOpts)
+ if err != nil {
+ return err
+ }
+ ts.SetStorageOpts(storageOpts.SetBootstrapProcessProvider(process))
+
+ return nil
+}
+
// Implements topology.MapProvider, and makes sure that the topology
// map provided always comes from the most recent database in the testSetup
-// since they get\ recreated everytime startServer/stopServer is called and
+// since they get\ recreated everytime StartServer/StopServer is called and
// are not available (nil value) after creation but before the first call
-// to startServer.
+// to StartServer.
func (ts *testSetup) TopologyMap() (topology.Map, error) {
return ts.db.TopologyMap()
}
@@ -743,7 +976,7 @@ func newOrigin(id string, tchannelNodeAddr string) topology.Host {
func newClients(
topoInit topology.Initializer,
- opts testOptions,
+ opts TestOptions,
schemaReg namespace.SchemaRegistry,
id,
tchannelNodeAddr string,
@@ -786,9 +1019,9 @@ func newClients(
return adminClient, verificationAdminClient, nil
}
-type testSetups []*testSetup
+type testSetups []TestSetup
-func (ts testSetups) parallel(fn func(s *testSetup)) {
+func (ts testSetups) parallel(fn func(s TestSetup)) {
var wg sync.WaitGroup
for _, setup := range ts {
s := setup
@@ -820,7 +1053,7 @@ func newNodes(
) (testSetups, topology.Initializer, closeFn) {
var (
log = zap.L()
- opts = newTestOptions(t).
+ opts = NewTestOptions(t).
SetNamespaces(nspaces).
SetTickMinimumInterval(3 * time.Second).
SetWriteNewSeriesAsync(asyncInserts).
@@ -855,9 +1088,9 @@ func newNodes(
nodeClose := func() { // Clean up running servers at end of test
log.Debug("servers closing")
- nodes.parallel(func(s *testSetup) {
- if s.serverIsBootstrapped() {
- require.NoError(t, s.stopServer())
+ nodes.parallel(func(s TestSetup) {
+ if s.ServerIsBootstrapped() {
+ require.NoError(t, s.StopServer())
}
})
closeFn()
@@ -866,3 +1099,12 @@ func newNodes(
return nodes, topoInit, nodeClose
}
+
+func mustInspectFilesystem(fsOpts fs.Options) fs.Inspection {
+ inspection, err := fs.InspectFilesystem(fsOpts)
+ if err != nil {
+ panic(err)
+ }
+
+ return inspection
+}
diff --git a/src/dbnode/integration/truncate_namespace_test.go b/src/dbnode/integration/truncate_namespace_test.go
index 8c20a07fde..0d90f0edf6 100644
--- a/src/dbnode/integration/truncate_namespace_test.go
+++ b/src/dbnode/integration/truncate_namespace_test.go
@@ -40,27 +40,27 @@ func TestTruncateNamespace(t *testing.T) {
t.SkipNow() // Just skip if we're doing a short run
}
// Test setup
- testOpts := newTestOptions(t)
- testSetup, err := newTestSetup(t, testOpts, nil)
+ testOpts := NewTestOptions(t)
+ testSetup, err := NewTestSetup(t, testOpts, nil)
require.NoError(t, err)
- defer testSetup.close()
+ defer testSetup.Close()
blockSize := namespace.NewOptions().RetentionOptions().BlockSize()
// Start the server
- log := testSetup.storageOpts.InstrumentOptions().Logger()
+ log := testSetup.StorageOpts().InstrumentOptions().Logger()
log.Debug("truncate namespace test")
- require.NoError(t, testSetup.startServer())
+ require.NoError(t, testSetup.StartServer())
log.Debug("server is now up")
// Stop the server
defer func() {
- require.NoError(t, testSetup.stopServer())
+ require.NoError(t, testSetup.StopServer())
log.Debug("server is now down")
}()
// Write test data
- now := testSetup.getNowFn()
+ now := testSetup.NowFn()()
seriesMaps := make(map[xtime.UnixNano]generate.SeriesBlock)
inputData := []struct {
namespace ident.ID
@@ -74,10 +74,10 @@ func TestTruncateNamespace(t *testing.T) {
},
}
for _, input := range inputData {
- testSetup.setNowFn(input.conf.Start)
+ testSetup.SetNowFn(input.conf.Start)
testData := generate.Block(input.conf)
seriesMaps[xtime.ToUnixNano(input.conf.Start)] = testData
- require.NoError(t, testSetup.writeBatch(input.namespace, testData))
+ require.NoError(t, testSetup.WriteBatch(input.namespace, testData))
}
log.Debug("test data is now written")
@@ -90,30 +90,30 @@ func TestTruncateNamespace(t *testing.T) {
log.Debug("fetching data from nonexistent namespace")
fetchReq.NameSpace = "nonexistent"
- _, err = testSetup.fetch(fetchReq)
+ _, err = testSetup.Fetch(fetchReq)
require.Error(t, err)
log.Debug("fetching data from wrong namespace")
fetchReq.NameSpace = testNamespaces[1].String()
- res, err := testSetup.fetch(fetchReq)
+ res, err := testSetup.Fetch(fetchReq)
require.NoError(t, err)
require.Equal(t, 0, len(res))
log.Sugar().Debugf("fetching data from namespace %s", testNamespaces[0])
fetchReq.NameSpace = testNamespaces[0].String()
- res, err = testSetup.fetch(fetchReq)
+ res, err = testSetup.Fetch(fetchReq)
require.NoError(t, err)
require.Equal(t, 100, len(res))
log.Sugar().Debugf("truncate namespace %s", testNamespaces[0])
truncateReq := rpc.NewTruncateRequest()
truncateReq.NameSpace = testNamespaces[0].Bytes()
- truncated, err := testSetup.truncate(truncateReq)
+ truncated, err := testSetup.Truncate(truncateReq)
require.NoError(t, err)
require.Equal(t, int64(1), truncated)
log.Sugar().Debugf("fetching data from namespace %s again", testNamespaces[0])
- res, err = testSetup.fetch(fetchReq)
+ res, err = testSetup.Fetch(fetchReq)
require.Error(t, err)
log.Sugar().Debugf("fetching data from a different namespace %s", testNamespaces[1])
@@ -121,7 +121,7 @@ func TestTruncateNamespace(t *testing.T) {
fetchReq.NameSpace = testNamespaces[1].String()
fetchReq.RangeStart = xtime.ToNormalizedTime(now.Add(blockSize), time.Second)
fetchReq.RangeEnd = xtime.ToNormalizedTime(now.Add(blockSize*2), time.Second)
- res, err = testSetup.fetch(fetchReq)
+ res, err = testSetup.Fetch(fetchReq)
require.NoError(t, err)
require.Equal(t, 50, len(res))
}
diff --git a/src/dbnode/integration/write_quorum_test.go b/src/dbnode/integration/write_quorum_test.go
index 29ef32ca47..82f7cd96ec 100644
--- a/src/dbnode/integration/write_quorum_test.go
+++ b/src/dbnode/integration/write_quorum_test.go
@@ -55,8 +55,8 @@ func TestNormalQuorumOnlyOneUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
// Writes succeed to one node
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
@@ -81,10 +81,10 @@ func TestNormalQuorumOnlyTwoUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
// Writes succeed to two nodes
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
@@ -109,12 +109,12 @@ func TestNormalQuorumAllUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
- require.NoError(t, nodes[2].startServer())
- defer func() { require.NoError(t, nodes[2].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
+ require.NoError(t, nodes[2].StartServer())
+ defer func() { require.NoError(t, nodes[2].StopServer()) }()
// Writes succeed to all nodes
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
@@ -140,11 +140,11 @@ func TestAddNodeQuorumOnlyLeavingInitializingUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
- defer func() { require.NoError(t, nodes[3].stopServer()) }()
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
+ defer func() { require.NoError(t, nodes[3].StopServer()) }()
// No writes succeed to available nodes
assert.Error(t, testWrite(topology.ConsistencyLevelOne))
@@ -170,12 +170,12 @@ func TestAddNodeQuorumOnlyOneNormalAndLeavingInitializingUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
- defer func() { require.NoError(t, nodes[3].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
+ defer func() { require.NoError(t, nodes[3].StopServer()) }()
// Writes succeed to one available node
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
@@ -201,14 +201,14 @@ func TestAddNodeQuorumAllUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
- require.NoError(t, nodes[2].startServer())
- defer func() { require.NoError(t, nodes[2].stopServer()) }()
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
- defer func() { require.NoError(t, nodes[3].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
+ require.NoError(t, nodes[2].StartServer())
+ defer func() { require.NoError(t, nodes[2].StopServer()) }()
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
+ defer func() { require.NoError(t, nodes[3].StopServer()) }()
// Writes succeed to two available nodes
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
@@ -231,10 +231,10 @@ func makeTestWrite(
nspaces := []namespace.Metadata{md}
nodes, topoInit, closeFn := newNodes(t, numShards, instances, nspaces, false)
- now := nodes[0].getNowFn()
+ now := nodes[0].NowFn()()
for _, node := range nodes {
- node.opts = node.opts.SetNumShards(numShards)
+ node.SetOpts(node.Opts().SetNumShards(numShards))
}
clientopts := client.NewOptions().
diff --git a/src/dbnode/integration/write_read_high_concurrency_test.go b/src/dbnode/integration/write_read_high_concurrency_test.go
index d23e8524eb..dddb1b79fa 100644
--- a/src/dbnode/integration/write_read_high_concurrency_test.go
+++ b/src/dbnode/integration/write_read_high_concurrency_test.go
@@ -65,9 +65,9 @@ func TestWriteReadHighConcurrencyTestMultiNS(t *testing.T) {
SetReadConsistencyLevel(topology.ReadConsistencyLevelAll)
defer closeFn()
- log := nodes[0].storageOpts.InstrumentOptions().Logger()
+ log := nodes[0].StorageOpts().InstrumentOptions().Logger()
for _, n := range nodes {
- require.NoError(t, n.startServer())
+ require.NoError(t, n.StartServer())
}
c, err := client.NewClient(clientopts)
@@ -79,7 +79,7 @@ func TestWriteReadHighConcurrencyTestMultiNS(t *testing.T) {
var (
insertWg sync.WaitGroup
)
- now := nodes[0].db.Options().ClockOptions().NowFn()()
+ now := nodes[0].DB().Options().ClockOptions().NowFn()()
start := time.Now()
log.Info("starting data write")
diff --git a/src/dbnode/integration/write_read_timezone_test.go b/src/dbnode/integration/write_read_timezone_test.go
index 94bd240cdd..76403cf19b 100644
--- a/src/dbnode/integration/write_read_timezone_test.go
+++ b/src/dbnode/integration/write_read_timezone_test.go
@@ -76,19 +76,19 @@ func TestWriteReadTimezone(t *testing.T) {
require.NoError(t, err)
// Setup / start server
- opts := newTestOptions(t)
- setup, err := newTestSetup(t, opts, nil)
+ opts := NewTestOptions(t)
+ setup, err := NewTestSetup(t, opts, nil)
require.NoError(t, err)
- defer setup.close()
- require.NoError(t, setup.startServer())
- require.NoError(t, setup.waitUntilServerIsBootstrapped())
+ defer setup.Close()
+ require.NoError(t, setup.StartServer())
+ require.NoError(t, setup.WaitUntilServerIsBootstrapped())
// Make sure that the server's internal clock function returns pacific timezone
- start := setup.getNowFn()
- setup.setNowFn(start.In(pacificLocation))
+ start := setup.NowFn()()
+ setup.SetNowFn(start.In(pacificLocation))
// Instantiate a client
- client := setup.m3dbClient
+ client := setup.M3DBClient()
session, err := client.DefaultSession()
require.NoError(t, err)
defer session.Close()
diff --git a/src/dbnode/integration/write_tagged_quorum_test.go b/src/dbnode/integration/write_tagged_quorum_test.go
index d6b5f0f2a7..5a95d8729b 100644
--- a/src/dbnode/integration/write_tagged_quorum_test.go
+++ b/src/dbnode/integration/write_tagged_quorum_test.go
@@ -60,8 +60,8 @@ func TestWriteTaggedNormalQuorumOnlyOneUp(t *testing.T) {
defer closeFn()
// Writes succeed to one node
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
assert.Equal(t, 1, numNodesWithTaggedWrite(t, nodes))
@@ -88,10 +88,10 @@ func TestWriteTaggedNormalQuorumOnlyTwoUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
// Writes succeed to two nodes
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
@@ -118,12 +118,12 @@ func TestWriteTaggedNormalQuorumAllUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
- require.NoError(t, nodes[2].startServer())
- defer func() { require.NoError(t, nodes[2].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
+ require.NoError(t, nodes[2].StartServer())
+ defer func() { require.NoError(t, nodes[2].StopServer()) }()
// Writes succeed to all nodes
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
@@ -152,22 +152,22 @@ func TestWriteTaggedAddNodeQuorumOnlyLeavingInitializingUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
- defer func() { require.NoError(t, nodes[3].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
+ defer func() { require.NoError(t, nodes[3].StopServer()) }()
// No writes succeed to available nodes
assert.Error(t, testWrite(topology.ConsistencyLevelOne))
- numWrites := numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites := numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.True(t, numWrites == 0)
assert.Error(t, testWrite(topology.ConsistencyLevelMajority))
- numWrites = numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites = numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.True(t, numWrites == 0)
assert.Error(t, testWrite(topology.ConsistencyLevelAll))
- numWrites = numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites = numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.True(t, numWrites == 0)
}
@@ -189,24 +189,24 @@ func TestWriteTaggedAddNodeQuorumOnlyOneNormalAndLeavingInitializingUp(t *testin
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
- defer func() { require.NoError(t, nodes[3].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
+ defer func() { require.NoError(t, nodes[3].StopServer()) }()
// Writes succeed to one available node
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
- numWrites := numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites := numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.True(t, numWrites == 1)
assert.Error(t, testWrite(topology.ConsistencyLevelMajority))
- numWrites = numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites = numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.True(t, numWrites == 1)
assert.Error(t, testWrite(topology.ConsistencyLevelAll))
- numWrites = numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites = numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.True(t, numWrites == 1)
}
@@ -228,22 +228,22 @@ func TestWriteTaggedAddNodeQuorumAllUp(t *testing.T) {
})
defer closeFn()
- require.NoError(t, nodes[0].startServer())
- defer func() { require.NoError(t, nodes[0].stopServer()) }()
- require.NoError(t, nodes[1].startServer())
- defer func() { require.NoError(t, nodes[1].stopServer()) }()
- require.NoError(t, nodes[2].startServer())
- defer func() { require.NoError(t, nodes[2].stopServer()) }()
- require.NoError(t, nodes[3].startServerDontWaitBootstrap())
- defer func() { require.NoError(t, nodes[3].stopServer()) }()
+ require.NoError(t, nodes[0].StartServer())
+ defer func() { require.NoError(t, nodes[0].StopServer()) }()
+ require.NoError(t, nodes[1].StartServer())
+ defer func() { require.NoError(t, nodes[1].StopServer()) }()
+ require.NoError(t, nodes[2].StartServer())
+ defer func() { require.NoError(t, nodes[2].StopServer()) }()
+ require.NoError(t, nodes[3].StartServerDontWaitBootstrap())
+ defer func() { require.NoError(t, nodes[3].StopServer()) }()
// Writes succeed to two available nodes
assert.NoError(t, testWrite(topology.ConsistencyLevelOne))
- numWrites := numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites := numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.True(t, numWrites >= 1, numWrites)
assert.NoError(t, testWrite(topology.ConsistencyLevelMajority))
- numWrites = numNodesWithTaggedWrite(t, []*testSetup{nodes[1], nodes[2]})
+ numWrites = numNodesWithTaggedWrite(t, []TestSetup{nodes[1], nodes[2]})
assert.Equal(t, 2, numWrites)
assert.Error(t, testWrite(topology.ConsistencyLevelAll))
@@ -264,7 +264,7 @@ func makeTestWriteTagged(
s, err := c.NewSession()
require.NoError(t, err)
- now := nodes[0].getNowFn().Add(time.Minute)
+ now := nodes[0].NowFn()().Add(time.Minute)
return s.WriteTagged(testNamespaces[0], ident.StringID("quorumTest"),
ident.NewTagsIterator(ident.NewTags(ident.StringTag("foo", "bar"), ident.StringTag("boo", "baz"))),
now, 42, xtime.Second, nil)
@@ -283,20 +283,20 @@ func numNodesWithTaggedWrite(t *testing.T, setups testSetups) int {
return n
}
-func nodeHasTaggedWrite(t *testing.T, s *testSetup) bool {
- if s.db == nil {
+func nodeHasTaggedWrite(t *testing.T, s TestSetup) bool {
+ if s.DB() == nil {
return false
}
ctx := context.NewContext()
defer ctx.BlockingClose()
- nsCtx := namespace.NewContextFor(testNamespaces[0], s.schemaReg)
+ nsCtx := namespace.NewContextFor(testNamespaces[0], s.SchemaRegistry())
reQuery, err := m3ninxidx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
assert.NoError(t, err)
- now := s.getNowFn()
- res, err := s.db.QueryIDs(ctx, nsCtx.ID, index.Query{Query: reQuery}, index.QueryOptions{
+ now := s.NowFn()()
+ res, err := s.DB().QueryIDs(ctx, nsCtx.ID, index.Query{Query: reQuery}, index.QueryOptions{
StartInclusive: now.Add(-2 * time.Minute),
EndExclusive: now.Add(2 * time.Minute),
})
@@ -315,12 +315,12 @@ func nodeHasTaggedWrite(t *testing.T, s *testSetup) bool {
dpFound := false
id := ident.StringID("quorumTest")
- start := s.getNowFn()
- end := s.getNowFn().Add(5 * time.Minute)
- readers, err := s.db.ReadEncoded(ctx, nsCtx.ID, id, start, end)
+ start := s.NowFn()()
+ end := s.NowFn()().Add(5 * time.Minute)
+ readers, err := s.DB().ReadEncoded(ctx, nsCtx.ID, id, start, end)
require.NoError(t, err)
- mIter := s.db.Options().MultiReaderIteratorPool().Get()
+ mIter := s.DB().Options().MultiReaderIteratorPool().Get()
mIter.ResetSliceOfSlices(xio.NewReaderSliceOfSlicesFromBlockReadersIterator(readers), nsCtx.Schema)
defer mIter.Close()
for mIter.Next() {
diff --git a/src/dbnode/kvconfig/keys.go b/src/dbnode/kvconfig/keys.go
index 526cf9363a..e607be8d38 100644
--- a/src/dbnode/kvconfig/keys.go
+++ b/src/dbnode/kvconfig/keys.go
@@ -33,6 +33,11 @@ const (
// configuration specifying a hard limit for a cluster new series insertions.
ClusterNewSeriesInsertLimitKey = "m3db.node.cluster-new-series-insert-limit"
+ // EncodersPerBlockLimitKey is the KV config key for the runtime
+ // configuration specifying a hard limit on the number of active encoders
+ // per block.
+ EncodersPerBlockLimitKey = "m3db.node.encoders-per-block-limit"
+
// ClientBootstrapConsistencyLevel is the KV config key for the runtime
// configuration specifying the client bootstrap consistency level
ClientBootstrapConsistencyLevel = "m3db.client.bootstrap-consistency-level"
diff --git a/src/dbnode/namespace/convert.go b/src/dbnode/namespace/convert.go
index 8b7eafcbb4..853485a8bc 100644
--- a/src/dbnode/namespace/convert.go
+++ b/src/dbnode/namespace/convert.go
@@ -28,6 +28,8 @@ import (
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
+
+ protobuftypes "github.com/gogo/protobuf/types"
)
var (
@@ -35,7 +37,8 @@ var (
errNamespaceNil = errors.New("namespace options must be set")
)
-func fromNanos(n int64) time.Duration {
+// FromNanos converts nanoseconds to a namespace-compatible duration.
+func FromNanos(n int64) time.Duration {
return xtime.FromNormalizedDuration(n, time.Nanosecond)
}
@@ -48,14 +51,14 @@ func ToRetention(
}
ropts := retention.NewOptions().
- SetRetentionPeriod(fromNanos(ro.RetentionPeriodNanos)).
- SetFutureRetentionPeriod(fromNanos(ro.FutureRetentionPeriodNanos)).
- SetBlockSize(fromNanos(ro.BlockSizeNanos)).
- SetBufferFuture(fromNanos(ro.BufferFutureNanos)).
- SetBufferPast(fromNanos(ro.BufferPastNanos)).
+ SetRetentionPeriod(FromNanos(ro.RetentionPeriodNanos)).
+ SetFutureRetentionPeriod(FromNanos(ro.FutureRetentionPeriodNanos)).
+ SetBlockSize(FromNanos(ro.BlockSizeNanos)).
+ SetBufferFuture(FromNanos(ro.BufferFutureNanos)).
+ SetBufferPast(FromNanos(ro.BufferPastNanos)).
SetBlockDataExpiry(ro.BlockDataExpiry).
SetBlockDataExpiryAfterNotAccessedPeriod(
- fromNanos(ro.BlockDataExpiryAfterNotAccessPeriodNanos))
+ FromNanos(ro.BlockDataExpiryAfterNotAccessPeriodNanos))
if err := ropts.Validate(); err != nil {
return nil, err
@@ -74,11 +77,30 @@ func ToIndexOptions(
}
iopts = iopts.SetEnabled(io.Enabled).
- SetBlockSize(fromNanos(io.BlockSizeNanos))
+ SetBlockSize(FromNanos(io.BlockSizeNanos))
return iopts, nil
}
+// ToRuntimeOptions converts nsproto.NamespaceRuntimeOptions to RuntimeOptions.
+func ToRuntimeOptions(
+ opts *nsproto.NamespaceRuntimeOptions,
+) (RuntimeOptions, error) {
+ runtimeOpts := NewRuntimeOptions()
+ if opts == nil {
+ return runtimeOpts, nil
+ }
+ if v := opts.WriteIndexingPerCPUConcurrency; v != nil {
+ newValue := v.Value
+ runtimeOpts = runtimeOpts.SetWriteIndexingPerCPUConcurrency(&newValue)
+ }
+ if v := opts.FlushIndexingPerCPUConcurrency; v != nil {
+ newValue := v.Value
+ runtimeOpts = runtimeOpts.SetFlushIndexingPerCPUConcurrency(&newValue)
+ }
+ return runtimeOpts, nil
+}
+
// ToMetadata converts nsproto.Options to Metadata
func ToMetadata(
id string,
@@ -103,6 +125,11 @@ func ToMetadata(
return nil, err
}
+ runtimeOpts, err := ToRuntimeOptions(opts.RuntimeOptions)
+ if err != nil {
+ return nil, err
+ }
+
mopts := NewOptions().
SetBootstrapEnabled(opts.BootstrapEnabled).
SetFlushEnabled(opts.FlushEnabled).
@@ -113,7 +140,12 @@ func ToMetadata(
SetSchemaHistory(sr).
SetRetentionOptions(ropts).
SetIndexOptions(iopts).
- SetColdWritesEnabled(opts.ColdWritesEnabled)
+ SetColdWritesEnabled(opts.ColdWritesEnabled).
+ SetRuntimeOptions(runtimeOpts)
+
+ if err := mopts.Validate(); err != nil {
+ return nil, err
+ }
return NewMetadata(ident.StringID(id), mopts)
}
@@ -171,5 +203,31 @@ func OptionsToProto(opts Options) *nsproto.NamespaceOptions {
BlockSizeNanos: iopts.BlockSize().Nanoseconds(),
},
ColdWritesEnabled: opts.ColdWritesEnabled(),
+ RuntimeOptions: toRuntimeOptions(opts.RuntimeOptions()),
+ }
+}
+
+// toRuntimeOptions returns the corresponding RuntimeOptions proto.
+func toRuntimeOptions(opts RuntimeOptions) *nsproto.NamespaceRuntimeOptions {
+ if opts == nil || opts.IsDefault() {
+ return nil
+ }
+ var (
+ writeIndexingPerCPUConcurrency *protobuftypes.DoubleValue
+ flushIndexingPerCPUConcurrency *protobuftypes.DoubleValue
+ )
+ if v := opts.WriteIndexingPerCPUConcurrency(); v != nil {
+ writeIndexingPerCPUConcurrency = &protobuftypes.DoubleValue{
+ Value: *v,
+ }
+ }
+ if v := opts.FlushIndexingPerCPUConcurrency(); v != nil {
+ flushIndexingPerCPUConcurrency = &protobuftypes.DoubleValue{
+ Value: *v,
+ }
+ }
+ return &nsproto.NamespaceRuntimeOptions{
+ WriteIndexingPerCPUConcurrency: writeIndexingPerCPUConcurrency,
+ FlushIndexingPerCPUConcurrency: flushIndexingPerCPUConcurrency,
}
}
diff --git a/src/dbnode/namespace/dynamic.go b/src/dbnode/namespace/dynamic.go
index c6b388044e..d76a9f7a6d 100644
--- a/src/dbnode/namespace/dynamic.go
+++ b/src/dbnode/namespace/dynamic.go
@@ -113,7 +113,7 @@ func newDynamicRegistry(opts DynamicOptions) (Registry, error) {
logger.Info("initial namespace value received")
initValue := watch.Get()
- m, err := getMapFromUpdate(initValue)
+ m, err := getMapFromUpdate(initValue, opts.ForceColdWritesEnabled())
if err != nil {
logger.Error("dynamic namespace registry received invalid initial value", zap.Error(err))
return nil, err
@@ -189,7 +189,7 @@ func (r *dynamicRegistry) run() {
continue
}
- m, err := getMapFromUpdate(val)
+ m, err := getMapFromUpdate(val, r.opts.ForceColdWritesEnabled())
if err != nil {
r.metrics.numInvalidUpdates.Inc(1)
r.logger.Warn("dynamic namespace registry received invalid update, skipping",
@@ -199,11 +199,13 @@ func (r *dynamicRegistry) run() {
if m.Equal(r.maps()) {
r.metrics.numInvalidUpdates.Inc(1)
- r.logger.Warn("dynamic namespace registry received identical update, skipping")
+ r.logger.Warn("dynamic namespace registry received identical update, skipping",
+ zap.Int("version", val.Version()))
continue
}
- r.logger.Info("dynamic namespace registry updated to version", zap.Int("version", val.Version()))
+ r.logger.Info("dynamic namespace registry updated to version",
+ zap.Int("version", val.Version()))
r.Lock()
r.currentValue = val
r.currentMap = m
@@ -235,7 +237,7 @@ func (r *dynamicRegistry) Close() error {
return nil
}
-func getMapFromUpdate(val kv.Value) (Map, error) {
+func getMapFromUpdate(val kv.Value, forceColdWritesEnabled bool) (Map, error) {
if val == nil {
return nil, errInvalidRegistry
}
@@ -245,5 +247,18 @@ func getMapFromUpdate(val kv.Value) (Map, error) {
return nil, errInvalidRegistry
}
- return FromProto(protoRegistry)
+ m, err := FromProto(protoRegistry)
+ if err != nil {
+ return nil, err
+ }
+
+ // NB(bodu): Force cold writes to be enabled for all ns if specified.
+ if forceColdWritesEnabled {
+ m, err = NewMap(ForceColdWritesEnabledForMetadatas(m.Metadatas()))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return m, nil
}
diff --git a/src/dbnode/namespace/dynamic_options.go b/src/dbnode/namespace/dynamic_options.go
index cfe2613782..fdd427b98f 100644
--- a/src/dbnode/namespace/dynamic_options.go
+++ b/src/dbnode/namespace/dynamic_options.go
@@ -40,10 +40,11 @@ var (
)
type dynamicOpts struct {
- iopts instrument.Options
- csClient client.Client
- nsRegistryKey string
- initTimeout time.Duration
+ iopts instrument.Options
+ csClient client.Client
+ nsRegistryKey string
+ initTimeout time.Duration
+ forceColdWritesEnabled bool
}
// NewDynamicOptions creates a new DynamicOptions
@@ -97,3 +98,13 @@ func (o *dynamicOpts) SetNamespaceRegistryKey(k string) DynamicOptions {
func (o *dynamicOpts) NamespaceRegistryKey() string {
return o.nsRegistryKey
}
+
+func (o *dynamicOpts) SetForceColdWritesEnabled(enabled bool) DynamicOptions {
+ opts := *o
+ opts.forceColdWritesEnabled = enabled
+ return &opts
+}
+
+func (o *dynamicOpts) ForceColdWritesEnabled() bool {
+ return o.forceColdWritesEnabled
+}
diff --git a/src/dbnode/namespace/metadata.go b/src/dbnode/namespace/metadata.go
index 7181f94415..7cce34e5be 100644
--- a/src/dbnode/namespace/metadata.go
+++ b/src/dbnode/namespace/metadata.go
@@ -71,3 +71,15 @@ func (m *metadata) Options() Options {
func (m *metadata) Equal(value Metadata) bool {
return m.id.Equal(value.ID()) && m.Options().Equal(value.Options())
}
+
+// ForceColdWritesEnabledForMetadatas forces cold writes to be enabled for all ns.
+func ForceColdWritesEnabledForMetadatas(metadatas []Metadata) []Metadata {
+ mds := make([]Metadata, 0, len(metadatas))
+ for _, md := range metadatas {
+ mds = append(mds, &metadata{
+ id: md.ID(),
+ opts: md.Options().SetColdWritesEnabled(true),
+ })
+ }
+ return mds
+}
diff --git a/src/dbnode/namespace/metadata_new_map_gen.go b/src/dbnode/namespace/metadata_new_map_gen.go
index 46efb9fc35..91ddfa6d07 100644
--- a/src/dbnode/namespace/metadata_new_map_gen.go
+++ b/src/dbnode/namespace/metadata_new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/namespace/namespace_mock.go b/src/dbnode/namespace/namespace_mock.go
index 33434633fd..e8d41e1dce 100644
--- a/src/dbnode/namespace/namespace_mock.go
+++ b/src/dbnode/namespace/namespace_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/namespace/types.go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -368,6 +368,34 @@ func (mr *MockOptionsMockRecorder) SchemaHistory() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SchemaHistory", reflect.TypeOf((*MockOptions)(nil).SchemaHistory))
}
+// SetRuntimeOptions mocks base method
+func (m *MockOptions) SetRuntimeOptions(value RuntimeOptions) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetRuntimeOptions", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetRuntimeOptions indicates an expected call of SetRuntimeOptions
+func (mr *MockOptionsMockRecorder) SetRuntimeOptions(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRuntimeOptions", reflect.TypeOf((*MockOptions)(nil).SetRuntimeOptions), value)
+}
+
+// RuntimeOptions mocks base method
+func (m *MockOptions) RuntimeOptions() RuntimeOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RuntimeOptions")
+ ret0, _ := ret[0].(RuntimeOptions)
+ return ret0
+}
+
+// RuntimeOptions indicates an expected call of RuntimeOptions
+func (mr *MockOptionsMockRecorder) RuntimeOptions() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RuntimeOptions", reflect.TypeOf((*MockOptions)(nil).RuntimeOptions))
+}
+
// MockIndexOptions is a mock of IndexOptions interface
type MockIndexOptions struct {
ctrl *gomock.Controller
@@ -709,18 +737,18 @@ func (mr *MockSchemaRegistryMockRecorder) GetLatestSchema(id interface{}) *gomoc
}
// GetSchema mocks base method
-func (m *MockSchemaRegistry) GetSchema(id ident.ID, schemaId string) (SchemaDescr, error) {
+func (m *MockSchemaRegistry) GetSchema(id ident.ID, schemaID string) (SchemaDescr, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetSchema", id, schemaId)
+ ret := m.ctrl.Call(m, "GetSchema", id, schemaID)
ret0, _ := ret[0].(SchemaDescr)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSchema indicates an expected call of GetSchema
-func (mr *MockSchemaRegistryMockRecorder) GetSchema(id, schemaId interface{}) *gomock.Call {
+func (mr *MockSchemaRegistryMockRecorder) GetSchema(id, schemaID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockSchemaRegistry)(nil).GetSchema), id, schemaId)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockSchemaRegistry)(nil).GetSchema), id, schemaID)
}
// SetSchemaHistory mocks base method
@@ -1185,6 +1213,34 @@ func (mr *MockDynamicOptionsMockRecorder) NamespaceRegistryKey() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NamespaceRegistryKey", reflect.TypeOf((*MockDynamicOptions)(nil).NamespaceRegistryKey))
}
+// SetForceColdWritesEnabled mocks base method
+func (m *MockDynamicOptions) SetForceColdWritesEnabled(enabled bool) DynamicOptions {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetForceColdWritesEnabled", enabled)
+ ret0, _ := ret[0].(DynamicOptions)
+ return ret0
+}
+
+// SetForceColdWritesEnabled indicates an expected call of SetForceColdWritesEnabled
+func (mr *MockDynamicOptionsMockRecorder) SetForceColdWritesEnabled(enabled interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetForceColdWritesEnabled", reflect.TypeOf((*MockDynamicOptions)(nil).SetForceColdWritesEnabled), enabled)
+}
+
+// ForceColdWritesEnabled mocks base method
+func (m *MockDynamicOptions) ForceColdWritesEnabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ForceColdWritesEnabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// ForceColdWritesEnabled indicates an expected call of ForceColdWritesEnabled
+func (mr *MockDynamicOptionsMockRecorder) ForceColdWritesEnabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceColdWritesEnabled", reflect.TypeOf((*MockDynamicOptions)(nil).ForceColdWritesEnabled))
+}
+
// MockNamespaceWatch is a mock of NamespaceWatch interface
type MockNamespaceWatch struct {
ctrl *gomock.Controller
diff --git a/src/dbnode/namespace/namespace_runtime_options.go b/src/dbnode/namespace/namespace_runtime_options.go
new file mode 100644
index 0000000000..8531cc6eaa
--- /dev/null
+++ b/src/dbnode/namespace/namespace_runtime_options.go
@@ -0,0 +1,254 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package namespace
+
+import (
+ "sync"
+
+ xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/watch"
+)
+
+const (
+ defaultWriteIndexingPerCPUConcurrency = 0.75
+ defaultFlushIndexingPerCPUConcurrency = 0.25
+)
+
+// RuntimeOptions is a set of runtime options that can
+// be set per namespace.
+type RuntimeOptions interface {
+ // IsDefault returns whether the runtime options are purely defaults
+ // with no values explicitly set.
+ IsDefault() bool
+
+ // Equal will return whether it's equal to another runtime options.
+ Equal(other RuntimeOptions) bool
+
+ // SetWriteIndexingPerCPUConcurrency sets the write
+ // indexing per CPU concurrency.
+ SetWriteIndexingPerCPUConcurrency(value *float64) RuntimeOptions
+
+ // WriteIndexingPerCPUConcurrency returns the write
+ // indexing per CPU concurrency.
+ WriteIndexingPerCPUConcurrency() *float64
+
+ // WriteIndexingPerCPUConcurrencyOrDefault returns the write
+ // indexing per CPU concurrency or default.
+ WriteIndexingPerCPUConcurrencyOrDefault() float64
+
+ // SetFlushIndexingPerCPUConcurrency sets the flush
+ // indexing per CPU concurrency.
+ SetFlushIndexingPerCPUConcurrency(value *float64) RuntimeOptions
+
+ // FlushIndexingPerCPUConcurrency returns the flush
+ // indexing per CPU concurrency.
+ FlushIndexingPerCPUConcurrency() *float64
+
+ // FlushIndexingPerCPUConcurrencyOrDefault returns the flush
+ // indexing per CPU concurrency.
+ FlushIndexingPerCPUConcurrencyOrDefault() float64
+}
+
+// RuntimeOptionsManagerRegistry is a registry of runtime options managers.
+type RuntimeOptionsManagerRegistry interface {
+ // RuntimeOptionsManager returns a namespace runtime options manager
+ // for the given namespace.
+ RuntimeOptionsManager(namespace string) RuntimeOptionsManager
+
+ // Close closes the watcher and all descendent watches.
+ Close()
+}
+
+// RuntimeOptionsManager is a runtime options manager.
+type RuntimeOptionsManager interface {
+ // Update updates the current runtime options.
+ Update(value RuntimeOptions) error
+
+ // Get returns the current values.
+ Get() RuntimeOptions
+
+ // RegisterListener registers a listener for updates to runtime options,
+ // it will synchronously call back the listener when this method is called
+ // to deliver the current set of runtime options.
+ RegisterListener(l RuntimeOptionsListener) xclose.SimpleCloser
+
+ // Close closes the watcher and all descendent watches.
+ Close()
+}
+
+// RuntimeOptionsListener listens for updates to runtime options.
+type RuntimeOptionsListener interface {
+ // SetNamespaceRuntimeOptions is called when the listener is registered
+ // and when any updates occurred passing the new runtime options.
+ SetNamespaceRuntimeOptions(value RuntimeOptions)
+}
+
+// runtimeOptions should always use pointer value types for it's options
+// and provide a "ValueOrDefault()" method so that we can be sure whether
+// the options are all defaults or not with the "IsDefault" method.
+type runtimeOptions struct {
+ writeIndexingPerCPUConcurrency *float64
+ flushIndexingPerCPUConcurrency *float64
+}
+
+// NewRuntimeOptions returns a new namespace runtime options.
+func NewRuntimeOptions() RuntimeOptions {
+ return newRuntimeOptions()
+}
+
+func newRuntimeOptions() *runtimeOptions {
+ return &runtimeOptions{}
+}
+
+func (o *runtimeOptions) IsDefault() bool {
+ defaults := newRuntimeOptions()
+ return *o == *defaults
+}
+
+func (o *runtimeOptions) Equal(other RuntimeOptions) bool {
+ return o.writeIndexingPerCPUConcurrency == other.WriteIndexingPerCPUConcurrency() &&
+ o.flushIndexingPerCPUConcurrency == other.FlushIndexingPerCPUConcurrency()
+}
+
+func (o *runtimeOptions) SetWriteIndexingPerCPUConcurrency(value *float64) RuntimeOptions {
+ opts := *o
+ opts.writeIndexingPerCPUConcurrency = value
+ return &opts
+}
+
+func (o *runtimeOptions) WriteIndexingPerCPUConcurrency() *float64 {
+ return o.writeIndexingPerCPUConcurrency
+}
+
+func (o *runtimeOptions) WriteIndexingPerCPUConcurrencyOrDefault() float64 {
+ value := o.writeIndexingPerCPUConcurrency
+ if value == nil {
+ return defaultWriteIndexingPerCPUConcurrency
+ }
+ return *value
+}
+
+func (o *runtimeOptions) SetFlushIndexingPerCPUConcurrency(value *float64) RuntimeOptions {
+ opts := *o
+ opts.flushIndexingPerCPUConcurrency = value
+ return &opts
+}
+
+func (o *runtimeOptions) FlushIndexingPerCPUConcurrency() *float64 {
+ return o.flushIndexingPerCPUConcurrency
+}
+
+func (o *runtimeOptions) FlushIndexingPerCPUConcurrencyOrDefault() float64 {
+ value := o.flushIndexingPerCPUConcurrency
+ if value == nil {
+ return defaultFlushIndexingPerCPUConcurrency
+ }
+ return *value
+}
+
+type runtimeOptionsManagerRegistry struct {
+ sync.RWMutex
+ managers map[string]RuntimeOptionsManager
+}
+
+// NewRuntimeOptionsManagerRegistry returns a new runtime options
+// manager registry.
+func NewRuntimeOptionsManagerRegistry() RuntimeOptionsManagerRegistry {
+ return &runtimeOptionsManagerRegistry{
+ managers: make(map[string]RuntimeOptionsManager),
+ }
+}
+
+func (r *runtimeOptionsManagerRegistry) RuntimeOptionsManager(
+ namespace string,
+) RuntimeOptionsManager {
+ r.Lock()
+ defer r.Unlock()
+
+ manager, ok := r.managers[namespace]
+ if !ok {
+ manager = NewRuntimeOptionsManager(namespace)
+ r.managers[namespace] = manager
+ }
+
+ return manager
+}
+
+func (r *runtimeOptionsManagerRegistry) Close() {
+ r.Lock()
+ defer r.Unlock()
+
+ for k, v := range r.managers {
+ v.Close()
+ delete(r.managers, k)
+ }
+}
+
+type runtimeOptionsManager struct {
+ namespace string
+ watchable watch.Watchable
+}
+
+// NewRuntimeOptionsManager returns a new runtime options manager.
+func NewRuntimeOptionsManager(namespace string) RuntimeOptionsManager {
+ watchable := watch.NewWatchable()
+ watchable.Update(NewRuntimeOptions())
+ return &runtimeOptionsManager{
+ namespace: namespace,
+ watchable: watchable,
+ }
+}
+
+func (w *runtimeOptionsManager) Update(value RuntimeOptions) error {
+ w.watchable.Update(value)
+ return nil
+}
+
+func (w *runtimeOptionsManager) Get() RuntimeOptions {
+ return w.watchable.Get().(RuntimeOptions)
+}
+
+func (w *runtimeOptionsManager) RegisterListener(
+ listener RuntimeOptionsListener,
+) xclose.SimpleCloser {
+ _, watch, _ := w.watchable.Watch()
+
+ // We always initialize the watchable so always read
+ // the first notification value
+ <-watch.C()
+
+ // Deliver the current runtime options
+ listener.SetNamespaceRuntimeOptions(watch.Get().(RuntimeOptions))
+
+ // Spawn a new goroutine that will terminate when the
+ // watchable terminates on the close of the runtime options manager
+ go func() {
+ for range watch.C() {
+ listener.SetNamespaceRuntimeOptions(watch.Get().(RuntimeOptions))
+ }
+ }()
+
+ return watch
+}
+
+func (w *runtimeOptionsManager) Close() {
+ w.watchable.Close()
+}
diff --git a/src/dbnode/namespace/options.go b/src/dbnode/namespace/options.go
index 6f146bc88c..f8cc25f312 100644
--- a/src/dbnode/namespace/options.go
+++ b/src/dbnode/namespace/options.go
@@ -53,6 +53,7 @@ var (
errIndexBlockSizePositive = errors.New("index block size must positive")
errIndexBlockSizeTooLarge = errors.New("index block size needs to be <= namespace retention period")
errIndexBlockSizeMustBeAMultipleOfDataBlockSize = errors.New("index block size must be a multiple of data block size")
+ errNamespaceRuntimeOptionsNotSet = errors.New("namespace runtime options is not set")
)
type options struct {
@@ -66,6 +67,7 @@ type options struct {
retentionOpts retention.Options
indexOpts IndexOptions
schemaHis SchemaHistory
+ runtimeOpts RuntimeOptions
}
// NewSchemaHistory returns an empty schema history.
@@ -86,6 +88,7 @@ func NewOptions() Options {
retentionOpts: retention.NewOptions(),
indexOpts: NewIndexOptions(),
schemaHis: NewSchemaHistory(),
+ runtimeOpts: NewRuntimeOptions(),
}
}
@@ -111,6 +114,9 @@ func (o *options) Validate() error {
if indexBlockSize%dataBlockSize != 0 {
return errIndexBlockSizeMustBeAMultipleOfDataBlockSize
}
+ if o.runtimeOpts == nil {
+ return errNamespaceRuntimeOptionsNotSet
+ }
return nil
}
@@ -124,7 +130,8 @@ func (o *options) Equal(value Options) bool {
o.coldWritesEnabled == value.ColdWritesEnabled() &&
o.retentionOpts.Equal(value.RetentionOptions()) &&
o.indexOpts.Equal(value.IndexOptions()) &&
- o.schemaHis.Equal(value.SchemaHistory())
+ o.schemaHis.Equal(value.SchemaHistory()) &&
+ o.runtimeOpts.Equal(value.RuntimeOptions())
}
func (o *options) SetBootstrapEnabled(value bool) Options {
@@ -226,3 +233,13 @@ func (o *options) SetSchemaHistory(value SchemaHistory) Options {
func (o *options) SchemaHistory() SchemaHistory {
return o.schemaHis
}
+
+func (o *options) SetRuntimeOptions(value RuntimeOptions) Options {
+ opts := *o
+ opts.runtimeOpts = value
+ return &opts
+}
+
+func (o *options) RuntimeOptions() RuntimeOptions {
+ return o.runtimeOpts
+}
diff --git a/src/dbnode/namespace/types.go b/src/dbnode/namespace/types.go
index 7e04675e06..acd61360fe 100644
--- a/src/dbnode/namespace/types.go
+++ b/src/dbnode/namespace/types.go
@@ -25,9 +25,9 @@ import (
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/dbnode/retention"
+ xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
- xclose "github.com/m3db/m3/src/x/close"
)
// Options controls namespace behavior
@@ -97,6 +97,12 @@ type Options interface {
// SchemaHistory returns the schema registry for this namespace.
SchemaHistory() SchemaHistory
+
+ // SetRuntimeOptions sets the RuntimeOptions.
+ SetRuntimeOptions(value RuntimeOptions) Options
+
+ // RuntimeOptions returns the RuntimeOptions.
+ RuntimeOptions() RuntimeOptions
}
// IndexOptions controls the indexing options for a namespace.
@@ -163,7 +169,7 @@ type SchemaRegistry interface {
// GetSchema gets the latest schema for the namespace.
// If proto is not enabled, nil, nil is returned
- GetSchema(id ident.ID, schemaId string) (SchemaDescr, error)
+ GetSchema(id ident.ID, schemaID string) (SchemaDescr, error)
// SetSchemaHistory sets the schema history for the namespace.
// If proto is not enabled, nil is returned
@@ -255,6 +261,14 @@ type DynamicOptions interface {
// NamespaceRegistryKey returns the kv-store key used for the
// NamespaceRegistry
NamespaceRegistryKey() string
+
+ // SetForceColdWritesEnabled sets whether or not to force enable cold writes
+ // for all ns.
+ SetForceColdWritesEnabled(enabled bool) DynamicOptions
+
+ // ForceColdWritesEnabled returns whether or not to force enable cold writes
+ // for all ns.
+ ForceColdWritesEnabled() bool
}
// NamespaceWatch watches for namespace updates.
@@ -269,4 +283,5 @@ type NamespaceWatch interface {
Close() error
}
+// NamespaceUpdater is a namespace updater function.
type NamespaceUpdater func(Map) error
diff --git a/src/dbnode/network/server/httpjson/cluster/server.go b/src/dbnode/network/server/httpjson/cluster/server.go
index 51bf05af39..2ccd905580 100644
--- a/src/dbnode/network/server/httpjson/cluster/server.go
+++ b/src/dbnode/network/server/httpjson/cluster/server.go
@@ -38,7 +38,7 @@ type server struct {
opts httpjson.ServerOptions
}
-// NewServer creates a cluster HTTP network service
+// NewServer creates a cluster HTTP network service.
func NewServer(
client client.Client,
address string,
diff --git a/src/dbnode/network/server/httpjson/handlers.go b/src/dbnode/network/server/httpjson/handlers.go
index c9b117cfae..fd3c49ea9f 100644
--- a/src/dbnode/network/server/httpjson/handlers.go
+++ b/src/dbnode/network/server/httpjson/handlers.go
@@ -27,18 +27,19 @@ import (
"fmt"
"net/http"
"reflect"
+ "strconv"
"strings"
xerrors "github.com/m3db/m3/src/x/errors"
+ "github.com/m3db/m3/src/x/headers"
apachethrift "github.com/apache/thrift/lib/go/thrift"
"github.com/uber/tchannel-go/thrift"
)
var (
- errRequestMustBeGet = xerrors.NewInvalidParamsError(errors.New("request without request params must be GET"))
- errRequestMustBePost = xerrors.NewInvalidParamsError(errors.New("request with request params must be POST"))
- errInvalidRequestBody = xerrors.NewInvalidParamsError(errors.New("request contains an invalid request body"))
+ errRequestMustBeGet = xerrors.NewInvalidParamsError(errors.New("request without request params must be GET"))
+ errRequestMustBePost = xerrors.NewInvalidParamsError(errors.New("request with request params must be POST"))
)
// Error is an HTTP JSON error that also sets a return status code.
@@ -154,18 +155,25 @@ func RegisterHandlers(mux *http.ServeMux, service interface{}, opts ServerOption
return
}
- headers := make(map[string]string)
+ httpHeaders := make(map[string]string)
for key, values := range r.Header {
if len(values) > 0 {
- headers[key] = values[0]
+ httpHeaders[key] = values[0]
}
}
var in interface{}
if reqIn != nil {
in = reflect.New(reqIn.Elem()).Interface()
- if err := json.NewDecoder(r.Body).Decode(in); err != nil {
- writeError(w, errInvalidRequestBody)
+ decoder := json.NewDecoder(r.Body)
+ disableDisallowUnknownFields, err := strconv.ParseBool(
+ r.Header.Get(headers.JSONDisableDisallowUnknownFields))
+ if err != nil || !disableDisallowUnknownFields {
+ decoder.DisallowUnknownFields()
+ }
+ if err := decoder.Decode(in); err != nil {
+ err := fmt.Errorf("invalid request body: %v", err)
+ writeError(w, xerrors.NewInvalidParamsError(err))
return
}
}
@@ -174,10 +182,10 @@ func RegisterHandlers(mux *http.ServeMux, service interface{}, opts ServerOption
callContext, _ := thrift.NewContext(opts.RequestTimeout())
if contextFn != nil {
// Allow derivation of context if context fn is set
- callContext = contextFn(callContext, method.Name, headers)
+ callContext = contextFn(callContext, method.Name, httpHeaders)
}
// Always set headers finally
- callContext = thrift.WithHeaders(callContext, headers)
+ callContext = thrift.WithHeaders(callContext, httpHeaders)
var (
svc = reflect.ValueOf(service)
diff --git a/src/dbnode/network/server/httpjson/handlers_test.go b/src/dbnode/network/server/httpjson/handlers_test.go
new file mode 100644
index 0000000000..0cfd80b4bd
--- /dev/null
+++ b/src/dbnode/network/server/httpjson/handlers_test.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package httpjson
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/cluster"
+ "github.com/m3db/m3/src/x/headers"
+ xjson "github.com/m3db/m3/src/x/json"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ apachethrift "github.com/apache/thrift/lib/go/thrift"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/net/context"
+)
+
+var (
+ // testClientOptions to not allocate multiple times.
+ testClientOptions = client.NewOptions()
+
+ errTestClientSessionError = errors.New("session test error")
+)
+
+func newTestClient(ctrl *gomock.Controller) *client.MockClient {
+ client := client.NewMockClient(ctrl)
+ client.EXPECT().Options().Return(testClientOptions).AnyTimes()
+ client.EXPECT().DefaultSession().Return(nil, errTestClientSessionError).AnyTimes()
+ return client
+}
+
+func TestRegisterHandlersRequestSimple(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ mux := http.NewServeMux()
+
+ client := newTestClient(ctrl)
+ service := cluster.NewService(client)
+
+ opts := NewServerOptions()
+
+ err := RegisterHandlers(mux, service, opts)
+ require.NoError(t, err)
+
+ recorder := httptest.NewRecorder()
+ req := httptest.NewRequest(http.MethodGet, "/health", nil)
+ mux.ServeHTTP(recorder, req)
+
+ require.Equal(t, http.StatusOK, recorder.Code)
+}
+
+func TestRegisterHandlersRequestPostRequestFn(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ mux := http.NewServeMux()
+
+ client := newTestClient(ctrl)
+ service := cluster.NewService(client)
+
+ calledPostRequestFn := 0
+ opts := NewServerOptions().
+ SetPostResponseFn(func(
+ _ context.Context,
+ method string,
+ _ apachethrift.TStruct,
+ ) {
+ require.Equal(t, "Health", method)
+ calledPostRequestFn++
+ })
+
+ err := RegisterHandlers(mux, service, opts)
+ require.NoError(t, err)
+
+ recorder := httptest.NewRecorder()
+ req := httptest.NewRequest(http.MethodGet, "/health", nil)
+ mux.ServeHTTP(recorder, req)
+
+ require.Equal(t, http.StatusOK, recorder.Code)
+ require.Equal(t, 1, calledPostRequestFn)
+}
+
+func TestRegisterHandlersRequestUnknownFieldsBadRequestError(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ mux := http.NewServeMux()
+
+ client := newTestClient(ctrl)
+ service := cluster.NewService(client)
+
+ opts := NewServerOptions()
+
+ err := RegisterHandlers(mux, service, opts)
+ require.NoError(t, err)
+
+ // Create a payload with unknown field.
+ payload := xjson.Map{"unknownFieldName": "unknownFieldValue"}
+
+ body := bytes.NewBuffer(nil)
+ require.NoError(t, json.NewEncoder(body).Encode(payload))
+
+ recorder := httptest.NewRecorder()
+ req := httptest.NewRequest(http.MethodPost, "/query", body)
+ mux.ServeHTTP(recorder, req)
+
+ require.Equal(t, http.StatusBadRequest, recorder.Code)
+
+ out, err := ioutil.ReadAll(recorder.Result().Body)
+ require.NoError(t, err)
+ str := string(out)
+ require.True(t, strings.Contains(str, "invalid request body:"))
+ require.True(t, strings.Contains(str, "unknown field"))
+}
+
+func TestRegisterHandlersRequestDisableUnknownFields(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ mux := http.NewServeMux()
+
+ client := newTestClient(ctrl)
+ service := cluster.NewService(client)
+
+ opts := NewServerOptions()
+
+ err := RegisterHandlers(mux, service, opts)
+ require.NoError(t, err)
+
+ // Create a payload with unknown field.
+ payload := xjson.Map{"unknownFieldName": "unknownFieldValue"}
+
+ body := bytes.NewBuffer(nil)
+ require.NoError(t, json.NewEncoder(body).Encode(payload))
+
+ recorder := httptest.NewRecorder()
+ req := httptest.NewRequest(http.MethodPost, "/query", body)
+ req.Header.Set(headers.JSONDisableDisallowUnknownFields, "true")
+ mux.ServeHTTP(recorder, req)
+
+ // Make sure not bad request, but expected error.
+ require.Equal(t, http.StatusInternalServerError, recorder.Code)
+
+ out, err := ioutil.ReadAll(recorder.Result().Body)
+ require.NoError(t, err)
+ str := string(out)
+ require.True(t, strings.Contains(str, errTestClientSessionError.Error()))
+}
diff --git a/src/dbnode/network/server/tchannelthrift/cluster/service.go b/src/dbnode/network/server/tchannelthrift/cluster/service.go
index 81987fdf79..c0cc0dc05b 100644
--- a/src/dbnode/network/server/tchannelthrift/cluster/service.go
+++ b/src/dbnode/network/server/tchannelthrift/cluster/service.go
@@ -121,7 +121,7 @@ func (s *service) Query(tctx thrift.Context, req *rpc.QueryRequest) (*rpc.QueryR
EndExclusive: end,
}
if l := req.Limit; l != nil {
- opts.Limit = int(*l)
+ opts.SeriesLimit = int(*l)
}
session, err := s.session()
diff --git a/src/dbnode/network/server/tchannelthrift/convert/convert.go b/src/dbnode/network/server/tchannelthrift/convert/convert.go
index aa658d06ad..6d40c56c6f 100644
--- a/src/dbnode/network/server/tchannelthrift/convert/convert.go
+++ b/src/dbnode/network/server/tchannelthrift/convert/convert.go
@@ -25,7 +25,6 @@ import (
"fmt"
"time"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
tterrors "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/errors"
"github.com/m3db/m3/src/dbnode/storage/index"
@@ -138,13 +137,14 @@ func ToSegments(blocks []xio.BlockReader) (ToSegmentsResult, error) {
}
startTime := xtime.ToNormalizedTime(blocks[0].Start, time.Nanosecond)
blockSize := xtime.ToNormalizedDuration(blocks[0].BlockSize, time.Nanosecond)
+ checksum := int64(seg.CalculateChecksum())
s.Merged = &rpc.Segment{
Head: bytesRef(seg.Head),
Tail: bytesRef(seg.Tail),
StartTime: &startTime,
BlockSize: &blockSize,
+ Checksum: &checksum,
}
- checksum := int64(digest.SegmentChecksum(seg))
return ToSegmentsResult{
Segments: s,
Checksum: &checksum,
@@ -161,11 +161,13 @@ func ToSegments(blocks []xio.BlockReader) (ToSegmentsResult, error) {
}
startTime := xtime.ToNormalizedTime(block.Start, time.Nanosecond)
blockSize := xtime.ToNormalizedDuration(block.BlockSize, time.Nanosecond)
+ checksum := int64(seg.CalculateChecksum())
s.Unmerged = append(s.Unmerged, &rpc.Segment{
Head: bytesRef(seg.Head),
Tail: bytesRef(seg.Tail),
StartTime: &startTime,
BlockSize: &blockSize,
+ Checksum: &checksum,
})
}
if len(s.Unmerged) == 0 {
@@ -217,11 +219,15 @@ func FromRPCFetchTaggedRequest(
}
opts := index.QueryOptions{
- StartInclusive: start,
- EndExclusive: end,
+ StartInclusive: start,
+ EndExclusive: end,
+ RequireExhaustive: req.RequireExhaustive,
}
if l := req.Limit; l != nil {
- opts.Limit = int(*l)
+ opts.SeriesLimit = int(*l)
+ }
+ if l := req.DocsLimit; l != nil {
+ opts.DocsLimit = int(*l)
}
q, err := idx.Unmarshal(req.Query)
@@ -262,18 +268,24 @@ func ToRPCFetchTaggedRequest(
}
request := rpc.FetchTaggedRequest{
- NameSpace: ns.Bytes(),
- RangeStart: rangeStart,
- RangeEnd: rangeEnd,
- FetchData: fetchData,
- Query: query,
+ NameSpace: ns.Bytes(),
+ RangeStart: rangeStart,
+ RangeEnd: rangeEnd,
+ FetchData: fetchData,
+ Query: query,
+ RequireExhaustive: opts.RequireExhaustive,
}
- if opts.Limit > 0 {
- l := int64(opts.Limit)
+ if opts.SeriesLimit > 0 {
+ l := int64(opts.SeriesLimit)
request.Limit = &l
}
+ if opts.DocsLimit > 0 {
+ l := int64(opts.DocsLimit)
+ request.DocsLimit = &l
+ }
+
return request, nil
}
@@ -298,7 +310,7 @@ func FromRPCAggregateQueryRequest(
},
}
if l := req.Limit; l != nil {
- opts.Limit = int(*l)
+ opts.SeriesLimit = int(*l)
}
query, err := FromRPCQuery(req.Query)
@@ -343,7 +355,7 @@ func FromRPCAggregateQueryRawRequest(
},
}
if l := req.Limit; l != nil {
- opts.Limit = int(*l)
+ opts.SeriesLimit = int(*l)
}
query, err := idx.Unmarshal(req.Query)
@@ -390,8 +402,8 @@ func ToRPCAggregateQueryRawRequest(
RangeEnd: rangeEnd,
}
- if opts.Limit > 0 {
- l := int64(opts.Limit)
+ if opts.SeriesLimit > 0 {
+ l := int64(opts.SeriesLimit)
request.Limit = &l
}
@@ -498,6 +510,11 @@ func (w *writeTaggedIter) Duplicate() ident.TagIterator {
}
}
+func (w *writeTaggedIter) Rewind() {
+ w.release()
+ w.currentIdx = -1
+}
+
// FromRPCQuery will create a m3ninx index query from an RPC query.
// NB: a nil query is considered equivalent to an `All` query.
func FromRPCQuery(query *rpc.Query) (idx.Query, error) {
diff --git a/src/dbnode/network/server/tchannelthrift/convert/convert_test.go b/src/dbnode/network/server/tchannelthrift/convert/convert_test.go
index 2a0a50f002..2c33c2dd06 100644
--- a/src/dbnode/network/server/tchannelthrift/convert/convert_test.go
+++ b/src/dbnode/network/server/tchannelthrift/convert/convert_test.go
@@ -105,7 +105,7 @@ func TestConvertFetchTaggedRequest(t *testing.T) {
opts := index.QueryOptions{
StartInclusive: time.Now().Add(-900 * time.Hour),
EndExclusive: time.Now(),
- Limit: 10,
+ SeriesLimit: 10,
}
fetchData := true
var limit int64 = 10
@@ -171,7 +171,7 @@ func TestConvertAggregateRawQueryRequest(t *testing.T) {
QueryOptions: index.QueryOptions{
StartInclusive: time.Now().Add(-900 * time.Hour),
EndExclusive: time.Now(),
- Limit: 10,
+ SeriesLimit: 10,
},
Type: index.AggregateTagNamesAndValues,
FieldFilter: index.AggregateFieldFilter{
diff --git a/src/dbnode/network/server/tchannelthrift/node/options.go b/src/dbnode/network/server/tchannelthrift/node/options.go
new file mode 100644
index 0000000000..a00f953e20
--- /dev/null
+++ b/src/dbnode/network/server/tchannelthrift/node/options.go
@@ -0,0 +1,106 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package node
+
+import (
+ "github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/uber/tchannel-go"
+ "github.com/uber/tchannel-go/thrift"
+)
+
+// NewTChanNodeServerFn creates a tchan node server.
+type NewTChanNodeServerFn func(
+ service Service,
+ iOpts instrument.Options,
+) thrift.TChanServer
+
+func defaultTChanNodeServerFn(
+ service Service,
+ _ instrument.Options,
+) thrift.TChanServer {
+ return rpc.NewTChanNodeServer(service)
+}
+
+// Options are thrift options.
+type Options interface {
+ // SetChannelOptions sets a tchan channel options.
+ SetChannelOptions(value *tchannel.ChannelOptions) Options
+
+ // ChannelOptions returns the tchan channel options.
+ ChannelOptions() *tchannel.ChannelOptions
+
+ // SetTChanNodeServerFn sets a tchan node server builder.
+ SetTChanNodeServerFn(value NewTChanNodeServerFn) Options
+
+ // TChanNodeServerFn returns a tchan node server builder.
+ TChanNodeServerFn() NewTChanNodeServerFn
+
+ // SetInstrumentOptions sets the instrumentation options.
+ SetInstrumentOptions(value instrument.Options) Options
+
+ // InstrumentOptions returns the instrumentation options.
+ InstrumentOptions() instrument.Options
+}
+
+type options struct {
+ channelOptions *tchannel.ChannelOptions
+ instrumentOpts instrument.Options
+ tchanNodeServerFn NewTChanNodeServerFn
+}
+
+// NewOptions creates a new options.
+func NewOptions(chanOpts *tchannel.ChannelOptions) Options {
+ return &options{
+ channelOptions: chanOpts,
+ tchanNodeServerFn: defaultTChanNodeServerFn,
+ }
+}
+func (o *options) SetChannelOptions(value *tchannel.ChannelOptions) Options {
+ opts := *o
+ opts.channelOptions = value
+ return &opts
+}
+
+func (o *options) ChannelOptions() *tchannel.ChannelOptions {
+ return o.channelOptions
+}
+
+func (o *options) SetTChanNodeServerFn(value NewTChanNodeServerFn) Options {
+ opts := *o
+ opts.tchanNodeServerFn = value
+ return &opts
+}
+
+func (o *options) TChanNodeServerFn() NewTChanNodeServerFn {
+ return o.tchanNodeServerFn
+}
+
+func (o *options) SetInstrumentOptions(value instrument.Options) Options {
+ opts := *o
+ opts.instrumentOpts = value
+ return &opts
+}
+
+func (o *options) InstrumentOptions() instrument.Options {
+ return o.instrumentOpts
+}
diff --git a/src/dbnode/network/server/tchannelthrift/node/server.go b/src/dbnode/network/server/tchannelthrift/node/server.go
index 3327757bb2..dfe44af0a2 100644
--- a/src/dbnode/network/server/tchannelthrift/node/server.go
+++ b/src/dbnode/network/server/tchannelthrift/node/server.go
@@ -21,7 +21,6 @@
package node
import (
- "github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
ns "github.com/m3db/m3/src/dbnode/network/server"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/node/channel"
@@ -34,7 +33,7 @@ type server struct {
service Service
address string
contextPool context.Pool
- opts *tchannel.ChannelOptions
+ opts Options
}
// NewServer creates a new node TChannel Thrift network service
@@ -42,13 +41,8 @@ func NewServer(
service Service,
address string,
contextPool context.Pool,
- opts *tchannel.ChannelOptions,
+ opts Options,
) ns.NetworkService {
- // Make the opts immutable on the way in
- if opts != nil {
- immutableOpts := *opts
- opts = &immutableOpts
- }
return &server{
service: service,
address: address,
@@ -58,13 +52,15 @@ func NewServer(
}
func (s *server) ListenAndServe() (ns.Close, error) {
- channel, err := tchannel.NewChannel(channel.ChannelName, s.opts)
+ chanOpts := s.opts.ChannelOptions()
+ channel, err := tchannel.NewChannel(channel.ChannelName, chanOpts)
if err != nil {
return nil, err
}
- tchannelthrift.RegisterServer(channel, rpc.NewTChanNodeServer(s.service), s.contextPool)
-
+ iOpts := s.opts.InstrumentOptions()
+ server := s.opts.TChanNodeServerFn()(s.service, iOpts)
+ tchannelthrift.RegisterServer(channel, server, s.contextPool)
channel.ListenAndServe(s.address)
return channel.Close, nil
diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go
index 1ab361fe61..652b47c409 100644
--- a/src/dbnode/network/server/tchannelthrift/node/service.go
+++ b/src/dbnode/network/server/tchannelthrift/node/service.go
@@ -23,6 +23,7 @@ package node
import (
"errors"
"fmt"
+ "runtime"
"sort"
"sync"
"time"
@@ -37,11 +38,12 @@ import (
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/tracepoint"
- "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
+ xdebug "github.com/m3db/m3/src/x/debug"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -118,23 +120,23 @@ type serviceMetrics struct {
overloadRejected tally.Counter
}
-func newServiceMetrics(scope tally.Scope, samplingRate float64) serviceMetrics {
+func newServiceMetrics(scope tally.Scope, opts instrument.TimerOptions) serviceMetrics {
return serviceMetrics{
- fetch: instrument.NewMethodMetrics(scope, "fetch", samplingRate),
- fetchTagged: instrument.NewMethodMetrics(scope, "fetchTagged", samplingRate),
- aggregate: instrument.NewMethodMetrics(scope, "aggregate", samplingRate),
- write: instrument.NewMethodMetrics(scope, "write", samplingRate),
- writeTagged: instrument.NewMethodMetrics(scope, "writeTagged", samplingRate),
- fetchBlocks: instrument.NewMethodMetrics(scope, "fetchBlocks", samplingRate),
- fetchBlocksMetadata: instrument.NewMethodMetrics(scope, "fetchBlocksMetadata", samplingRate),
- repair: instrument.NewMethodMetrics(scope, "repair", samplingRate),
- truncate: instrument.NewMethodMetrics(scope, "truncate", samplingRate),
+ fetch: instrument.NewMethodMetrics(scope, "fetch", opts),
+ fetchTagged: instrument.NewMethodMetrics(scope, "fetchTagged", opts),
+ aggregate: instrument.NewMethodMetrics(scope, "aggregate", opts),
+ write: instrument.NewMethodMetrics(scope, "write", opts),
+ writeTagged: instrument.NewMethodMetrics(scope, "writeTagged", opts),
+ fetchBlocks: instrument.NewMethodMetrics(scope, "fetchBlocks", opts),
+ fetchBlocksMetadata: instrument.NewMethodMetrics(scope, "fetchBlocksMetadata", opts),
+ repair: instrument.NewMethodMetrics(scope, "repair", opts),
+ truncate: instrument.NewMethodMetrics(scope, "truncate", opts),
fetchBatchRawRPCS: scope.Counter("fetchBatchRaw-rpcs"),
- fetchBatchRaw: instrument.NewBatchMethodMetrics(scope, "fetchBatchRaw", samplingRate),
+ fetchBatchRaw: instrument.NewBatchMethodMetrics(scope, "fetchBatchRaw", opts),
writeBatchRawRPCs: scope.Counter("writeBatchRaw-rpcs"),
- writeBatchRaw: instrument.NewBatchMethodMetrics(scope, "writeBatchRaw", samplingRate),
+ writeBatchRaw: instrument.NewBatchMethodMetrics(scope, "writeBatchRaw", opts),
writeTaggedBatchRawRPCs: scope.Counter("writeTaggedBatchRaw-rpcs"),
- writeTaggedBatchRaw: instrument.NewBatchMethodMetrics(scope, "writeTaggedBatchRaw", samplingRate),
+ writeTaggedBatchRaw: instrument.NewBatchMethodMetrics(scope, "writeTaggedBatchRaw", opts),
overloadRejected: scope.Counter("overload-rejected"),
}
}
@@ -161,6 +163,8 @@ type serviceState struct {
numOutstandingReadRPCs int
maxOutstandingReadRPCs int
+
+ profiles map[string]*xdebug.ContinuousFileProfile
}
func (s *serviceState) DB() (storage.Database, bool) {
@@ -246,6 +250,13 @@ type Service interface {
// Only safe to be called one time once the service has started.
SetDatabase(db storage.Database) error
+
+ // SetMetadata sets a metadata key to the given value.
+ SetMetadata(key, value string)
+
+ // GetMetadata returns the metadata for the given key and a bool indicating
+ // if it is present.
+ GetMetadata(key string) (string, bool)
}
// NewService creates a new node TChannel Thrift service
@@ -296,11 +307,12 @@ func NewService(db storage.Database, opts tchannelthrift.Options) Service {
},
maxOutstandingWriteRPCs: opts.MaxOutstandingWriteRequests(),
maxOutstandingReadRPCs: opts.MaxOutstandingReadRequests(),
+ profiles: make(map[string]*xdebug.ContinuousFileProfile),
},
logger: iopts.Logger(),
opts: opts,
nowFn: opts.ClockOptions().NowFn(),
- metrics: newServiceMetrics(scope, iopts.MetricsSamplingRate()),
+ metrics: newServiceMetrics(scope, iopts.TimerOptions()),
pools: pools{
id: opts.IdentifierPool(),
checkedBytesWrapper: opts.CheckedBytesWrapperPool(),
@@ -314,6 +326,34 @@ func NewService(db storage.Database, opts tchannelthrift.Options) Service {
}
}
+func (s *service) SetMetadata(key, value string) {
+ s.state.Lock()
+ defer s.state.Unlock()
+ // Copy health state and update single value since in flight
+ // requests might hold ref to current health result.
+ newHealth := &rpc.NodeHealthResult_{}
+ *newHealth = *s.state.health
+ var meta map[string]string
+ if curr := newHealth.Metadata; curr != nil {
+ meta = make(map[string]string, len(curr)+1)
+ for k, v := range curr {
+ meta[k] = v
+ }
+ } else {
+ meta = make(map[string]string, 8)
+ }
+ meta[key] = value
+ newHealth.Metadata = meta
+ s.state.health = newHealth
+}
+
+func (s *service) GetMetadata(key string) (string, bool) {
+ s.state.RLock()
+ md, found := s.state.health.Metadata[key]
+ s.state.RUnlock()
+ return md, found
+}
+
func (s *service) Health(ctx thrift.Context) (*rpc.NodeHealthResult_, error) {
health, ok := s.state.Health()
if !ok {
@@ -450,7 +490,7 @@ func (s *service) query(ctx context.Context, db storage.Database, req *rpc.Query
EndExclusive: end,
}
if l := req.Limit; l != nil {
- opts.Limit = int(*l)
+ opts.SeriesLimit = int(*l)
}
queryResult, err := db.QueryIDs(ctx, nsID, index.Query{Query: q}, opts)
if err != nil {
@@ -812,12 +852,14 @@ func (s *service) AggregateRaw(tctx thrift.Context, req *rpc.AggregateQueryRawRe
TagName: entry.Key().Bytes(),
}
tagValues := entry.Value()
- tagValuesMap := tagValues.Map()
- responseElem.TagValues = make([]*rpc.AggregateQueryRawResultTagValueElement, 0, tagValuesMap.Len())
- for _, entry := range tagValuesMap.Iter() {
- responseElem.TagValues = append(responseElem.TagValues, &rpc.AggregateQueryRawResultTagValueElement{
- TagValue: entry.Key().Bytes(),
- })
+ if tagValues.HasValues() {
+ tagValuesMap := tagValues.Map()
+ responseElem.TagValues = make([]*rpc.AggregateQueryRawResultTagValueElement, 0, tagValuesMap.Len())
+ for _, entry := range tagValuesMap.Iter() {
+ responseElem.TagValues = append(responseElem.TagValues, &rpc.AggregateQueryRawResultTagValueElement{
+ TagValue: entry.Key().Bytes(),
+ })
+ }
}
response.Results = append(response.Results, responseElem)
}
@@ -1377,7 +1419,7 @@ func (s *service) WriteBatchRaw(tctx thrift.Context, req *rpc.WriteBatchRawReque
)
}
- err = db.WriteBatch(ctx, nsID, batchWriter.(ts.WriteBatch),
+ err = db.WriteBatch(ctx, nsID, batchWriter.(writes.WriteBatch),
pooledReq)
if err != nil {
return convert.ToRPCError(err)
@@ -1437,7 +1479,7 @@ func (s *service) WriteBatchRawV2(tctx thrift.Context, req *rpc.WriteBatchRawV2R
var (
nsID ident.ID
nsIdx int64
- batchWriter ts.BatchWriter
+ batchWriter writes.BatchWriter
retryableErrors int
nonRetryableErrors int
@@ -1445,7 +1487,7 @@ func (s *service) WriteBatchRawV2(tctx thrift.Context, req *rpc.WriteBatchRawV2R
for i, elem := range req.Elements {
if nsID == nil || elem.NameSpace != nsIdx {
if batchWriter != nil {
- err = db.WriteBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq)
+ err = db.WriteBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq)
if err != nil {
return convert.ToRPCError(err)
}
@@ -1492,7 +1534,7 @@ func (s *service) WriteBatchRawV2(tctx thrift.Context, req *rpc.WriteBatchRawV2R
if batchWriter != nil {
// Write the last batch.
- err = db.WriteBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq)
+ err = db.WriteBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq)
if err != nil {
return convert.ToRPCError(err)
}
@@ -1647,7 +1689,7 @@ func (s *service) WriteTaggedBatchRawV2(tctx thrift.Context, req *rpc.WriteTagge
var (
nsID ident.ID
nsIdx int64
- batchWriter ts.BatchWriter
+ batchWriter writes.BatchWriter
retryableErrors int
nonRetryableErrors int
@@ -1655,7 +1697,7 @@ func (s *service) WriteTaggedBatchRawV2(tctx thrift.Context, req *rpc.WriteTagge
for i, elem := range req.Elements {
if nsID == nil || elem.NameSpace != nsIdx {
if batchWriter != nil {
- err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq)
+ err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq)
if err != nil {
return convert.ToRPCError(err)
}
@@ -1712,7 +1754,7 @@ func (s *service) WriteTaggedBatchRawV2(tctx thrift.Context, req *rpc.WriteTagge
if batchWriter != nil {
// Write the last batch.
- err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(ts.WriteBatch), pooledReq)
+ err = db.WriteTaggedBatch(ctx, nsID, batchWriter.(writes.WriteBatch), pooledReq)
if err != nil {
return convert.ToRPCError(err)
}
@@ -1940,6 +1982,139 @@ func (s *service) SetWriteNewSeriesLimitPerShardPerSecond(
return s.GetWriteNewSeriesLimitPerShardPerSecond(ctx)
}
+func (s *service) DebugProfileStart(
+ ctx thrift.Context,
+ req *rpc.DebugProfileStartRequest,
+) (*rpc.DebugProfileStartResult_, error) {
+ s.state.Lock()
+ defer s.state.Unlock()
+
+ _, ok := s.state.profiles[req.Name]
+ if ok {
+ err := fmt.Errorf("profile already exists: %s", req.Name)
+ return nil, tterrors.NewBadRequestError(err)
+ }
+
+ var (
+ interval time.Duration
+ duration time.Duration
+ debug int
+ err error
+ )
+ if v := req.Interval; v != nil {
+ interval, err = time.ParseDuration(*v)
+ if err != nil {
+ return nil, tterrors.NewBadRequestError(err)
+ }
+ }
+ if v := req.Duration; v != nil {
+ duration, err = time.ParseDuration(*v)
+ if err != nil {
+ return nil, tterrors.NewBadRequestError(err)
+ }
+ }
+ if v := req.Debug; v != nil {
+ debug = int(*v)
+ }
+
+ conditional := func() bool {
+ if v := req.ConditionalNumGoroutinesGreaterThan; v != nil {
+ if runtime.NumGoroutine() <= int(*v) {
+ return false
+ }
+ }
+ if v := req.ConditionalNumGoroutinesLessThan; v != nil {
+ if runtime.NumGoroutine() >= int(*v) {
+ return false
+ }
+ }
+ if v := req.ConditionalIsOverloaded; v != nil {
+ overloaded := s.state.db != nil && s.state.db.IsOverloaded()
+ if *v != overloaded {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ p, err := xdebug.NewContinuousFileProfile(xdebug.ContinuousFileProfileOptions{
+ FilePathTemplate: req.FilePathTemplate,
+ ProfileName: req.Name,
+ ProfileDuration: duration,
+ ProfileDebug: debug,
+ Conditional: conditional,
+ Interval: interval,
+ InstrumentOptions: s.opts.InstrumentOptions(),
+ })
+ if err != nil {
+ return nil, tterrors.NewBadRequestError(err)
+ }
+
+ if err := p.Start(); err != nil {
+ return nil, err
+ }
+
+ s.state.profiles[req.Name] = p
+
+ return &rpc.DebugProfileStartResult_{}, nil
+}
+
+func (s *service) DebugProfileStop(
+ ctx thrift.Context,
+ req *rpc.DebugProfileStopRequest,
+) (*rpc.DebugProfileStopResult_, error) {
+ s.state.Lock()
+ defer s.state.Unlock()
+
+ existing, ok := s.state.profiles[req.Name]
+ if !ok {
+ err := fmt.Errorf("profile does not exist: %s", req.Name)
+ return nil, tterrors.NewBadRequestError(err)
+ }
+
+ if err := existing.Stop(); err != nil {
+ return nil, err
+ }
+
+ delete(s.state.profiles, req.Name)
+
+ return &rpc.DebugProfileStopResult_{}, nil
+}
+
+func (s *service) DebugIndexMemorySegments(
+ ctx thrift.Context,
+ req *rpc.DebugIndexMemorySegmentsRequest,
+) (
+ *rpc.DebugIndexMemorySegmentsResult_,
+ error,
+) {
+ db, err := s.startRPCWithDB()
+ if err != nil {
+ return nil, err
+ }
+
+ var multiErr xerrors.MultiError
+ for _, ns := range db.Namespaces() {
+ idx, err := ns.Index()
+ if err != nil {
+ return nil, err
+ }
+
+ if err := idx.DebugMemorySegments(storage.DebugMemorySegmentsOptions{
+ OutputDirectory: req.Directory,
+ }); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := multiErr.FinalError(); err != nil {
+ return nil, err
+ }
+
+ return &rpc.DebugIndexMemorySegmentsResult_{}, nil
+}
+
func (s *service) SetDatabase(db storage.Database) error {
s.state.Lock()
defer s.state.Unlock()
diff --git a/src/dbnode/network/server/tchannelthrift/node/service_test.go b/src/dbnode/network/server/tchannelthrift/node/service_test.go
index b371031998..9e43ae2561 100644
--- a/src/dbnode/network/server/tchannelthrift/node/service_test.go
+++ b/src/dbnode/network/server/tchannelthrift/node/service_test.go
@@ -26,10 +26,10 @@ import (
"errors"
"fmt"
"sort"
+ "sync"
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift"
@@ -42,11 +42,13 @@ import (
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/serialize"
+ xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
@@ -72,7 +74,7 @@ func init() {
}
func TestServiceHealth(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -104,7 +106,7 @@ func TestServiceHealth(t *testing.T) {
}
func TestServiceBootstrapped(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -177,7 +179,7 @@ func TestServiceBootstrappedInPlacementOrNoPlacement(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
// Simulate placement
@@ -214,7 +216,7 @@ func TestServiceBootstrappedInPlacementOrNoPlacement(t *testing.T) {
}
func TestServiceQuery(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -302,7 +304,7 @@ func TestServiceQuery(t *testing.T) {
index.QueryOptions{
StartInclusive: start,
EndExclusive: end,
- Limit: 10,
+ SeriesLimit: 10,
}).Return(index.QueryResult{Results: resMap, Exhaustive: true}, nil)
limit := int64(10)
@@ -348,8 +350,45 @@ func TestServiceQuery(t *testing.T) {
}
}
+func TestServiceSetMetadata(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ size := 100
+ mockDB := storage.NewMockDatabase(ctrl)
+ service := NewService(mockDB, testTChannelThriftOptions).(*service)
+ metas := make([]string, 0, size)
+ for i := 0; i < size; i++ {
+ metas = append(metas, fmt.Sprint(i))
+ }
+
+ var wg sync.WaitGroup
+ for _, md := range metas {
+ wg.Add(1)
+ md := md
+ go func() {
+ service.SetMetadata(md, md)
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+ for _, md := range metas {
+ wg.Add(1)
+ md := md
+ go func() {
+ meta, ok := service.GetMetadata(md)
+ assert.True(t, ok)
+ assert.Equal(t, meta, md)
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
func TestServiceQueryOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -389,7 +428,7 @@ func TestServiceQueryOverloaded(t *testing.T) {
}
func TestServiceQueryDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -425,7 +464,7 @@ func TestServiceQueryDatabaseNotSet(t *testing.T) {
}
func TestServiceQueryUnknownErr(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -457,7 +496,7 @@ func TestServiceQueryUnknownErr(t *testing.T) {
index.QueryOptions{
StartInclusive: start,
EndExclusive: end,
- Limit: 10,
+ SeriesLimit: 10,
}).Return(index.QueryResult{}, unknownErr)
limit := int64(10)
@@ -480,7 +519,7 @@ func TestServiceQueryUnknownErr(t *testing.T) {
}
func TestServiceFetch(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -522,8 +561,8 @@ func TestServiceFetch(t *testing.T) {
mockDB.EXPECT().
ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher("foo"), start, end).
Return([][]xio.BlockReader{
- []xio.BlockReader{
- xio.BlockReader{
+ {
+ {
SegmentReader: stream,
},
},
@@ -547,7 +586,7 @@ func TestServiceFetch(t *testing.T) {
}
func TestServiceFetchIsOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -580,7 +619,7 @@ func TestServiceFetchIsOverloaded(t *testing.T) {
}
func TestServiceFetchDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -609,7 +648,7 @@ func TestServiceFetchDatabaseNotSet(t *testing.T) {
}
func TestServiceFetchUnknownErr(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
@@ -644,7 +683,7 @@ func TestServiceFetchUnknownErr(t *testing.T) {
}
func TestServiceFetchBatchRaw(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -694,8 +733,8 @@ func TestServiceFetchBatchRaw(t *testing.T) {
mockDB.EXPECT().
ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
Return([][]xio.BlockReader{
- []xio.BlockReader{
- xio.BlockReader{
+ {
+ {
SegmentReader: stream,
},
},
@@ -741,7 +780,7 @@ func TestServiceFetchBatchRaw(t *testing.T) {
}
func TestServiceFetchBatchRawV2MultiNS(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -796,8 +835,8 @@ func TestServiceFetchBatchRawV2MultiNS(t *testing.T) {
mockDB.EXPECT().
ReadEncoded(ctx, ident.NewIDMatcher(nsID), ident.NewIDMatcher(id), start, end).
Return([][]xio.BlockReader{
- []xio.BlockReader{
- xio.BlockReader{
+ {
+ {
SegmentReader: stream,
},
},
@@ -806,14 +845,14 @@ func TestServiceFetchBatchRawV2MultiNS(t *testing.T) {
ids := [][]byte{[]byte("foo"), []byte("bar")}
elements := []*rpc.FetchBatchRawV2RequestElement{
- &rpc.FetchBatchRawV2RequestElement{
+ {
NameSpace: 0,
RangeStart: start.Unix(),
RangeEnd: end.Unix(),
ID: []byte("foo"),
RangeTimeType: rpc.TimeType_UNIX_SECONDS,
},
- &rpc.FetchBatchRawV2RequestElement{
+ {
NameSpace: 1,
RangeStart: start.Unix(),
RangeEnd: end.Unix(),
@@ -858,7 +897,7 @@ func TestServiceFetchBatchRawV2MultiNS(t *testing.T) {
// TestServiceFetchBatchRawOverMaxOutstandingRequests tests that the FetchBatchRaw endpoint
// will reject requests if the number of outstanding read requests has hit the maximum.
func TestServiceFetchBatchRawOverMaxOutstandingRequests(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -914,8 +953,8 @@ func TestServiceFetchBatchRawOverMaxOutstandingRequests(t *testing.T) {
<-testIsComplete
}).
Return([][]xio.BlockReader{
- []xio.BlockReader{
- xio.BlockReader{
+ {
+ {
SegmentReader: stream,
},
},
@@ -955,7 +994,7 @@ func TestServiceFetchBatchRawOverMaxOutstandingRequests(t *testing.T) {
}
func TestServiceFetchBatchRawUnknownError(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1004,7 +1043,7 @@ func TestServiceFetchBatchRawUnknownError(t *testing.T) {
}
func TestServiceFetchBatchRawIsOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1037,7 +1076,7 @@ func TestServiceFetchBatchRawIsOverloaded(t *testing.T) {
}
func TestServiceFetchBatchRawDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -1064,7 +1103,7 @@ func TestServiceFetchBatchRawDatabaseNotSet(t *testing.T) {
}
func TestServiceFetchBlocksRaw(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
nsID := "metrics"
@@ -1116,11 +1155,9 @@ func TestServiceFetchBlocksRaw(t *testing.T) {
seg, err := streams[id].Segment()
require.NoError(t, err)
- checksum := digest.SegmentChecksum(seg)
- checksums[id] = checksum
-
+ checksums[id] = seg.CalculateChecksum()
expectedBlockReader := []xio.BlockReader{
- xio.BlockReader{
+ {
SegmentReader: stream,
Start: start,
},
@@ -1138,11 +1175,11 @@ func TestServiceFetchBlocksRaw(t *testing.T) {
NameSpace: []byte(nsID),
Shard: 0,
Elements: []*rpc.FetchBlocksRawRequestElement{
- &rpc.FetchBlocksRawRequestElement{
+ {
ID: ids[0],
Starts: []int64{start.UnixNano()},
},
- &rpc.FetchBlocksRawRequestElement{
+ {
ID: ids[1],
Starts: []int64{start.UnixNano()},
},
@@ -1181,7 +1218,7 @@ func TestServiceFetchBlocksRaw(t *testing.T) {
}
func TestServiceFetchBlocksRawIsOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
nsID := "metrics"
@@ -1210,11 +1247,11 @@ func TestServiceFetchBlocksRawIsOverloaded(t *testing.T) {
NameSpace: []byte(nsID),
Shard: 0,
Elements: []*rpc.FetchBlocksRawRequestElement{
- &rpc.FetchBlocksRawRequestElement{
+ {
ID: ids[0],
Starts: []int64{start.UnixNano()},
},
- &rpc.FetchBlocksRawRequestElement{
+ {
ID: ids[1],
Starts: []int64{start.UnixNano()},
},
@@ -1224,7 +1261,7 @@ func TestServiceFetchBlocksRawIsOverloaded(t *testing.T) {
}
func TestServiceFetchBlocksRawDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -1246,11 +1283,11 @@ func TestServiceFetchBlocksRawDatabaseNotSet(t *testing.T) {
NameSpace: []byte(nsID),
Shard: 0,
Elements: []*rpc.FetchBlocksRawRequestElement{
- &rpc.FetchBlocksRawRequestElement{
+ {
ID: ids[0],
Starts: []int64{start.UnixNano()},
},
- &rpc.FetchBlocksRawRequestElement{
+ {
ID: ids[1],
Starts: []int64{start.UnixNano()},
},
@@ -1260,7 +1297,7 @@ func TestServiceFetchBlocksRawDatabaseNotSet(t *testing.T) {
}
func TestServiceFetchBlocksMetadataEndpointV2Raw(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
// Setup mock db / service / context
@@ -1401,7 +1438,7 @@ func TestServiceFetchBlocksMetadataEndpointV2Raw(t *testing.T) {
}
func TestServiceFetchBlocksMetadataEndpointV2RawIsOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
// Setup mock db / service / context
@@ -1442,7 +1479,7 @@ func TestServiceFetchBlocksMetadataEndpointV2RawIsOverloaded(t *testing.T) {
}
func TestServiceFetchBlocksMetadataEndpointV2RawDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
// Configure constants / options
@@ -1478,7 +1515,7 @@ func TestServiceFetchBlocksMetadataEndpointV2RawDatabaseNotSet(t *testing.T) {
}
func TestServiceFetchTagged(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1560,7 +1597,7 @@ func TestServiceFetchTagged(t *testing.T) {
index.QueryOptions{
StartInclusive: start,
EndExclusive: end,
- Limit: 10,
+ SeriesLimit: 10,
}).Return(index.QueryResult{Results: resMap, Exhaustive: true}, nil)
startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
@@ -1626,7 +1663,7 @@ func TestServiceFetchTagged(t *testing.T) {
}
func TestServiceFetchTaggedIsOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1681,7 +1718,7 @@ func TestServiceFetchTaggedIsOverloaded(t *testing.T) {
}
func TestServiceFetchTaggedDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -1722,7 +1759,7 @@ func TestServiceFetchTaggedDatabaseNotSet(t *testing.T) {
}
func TestServiceFetchTaggedNoData(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1756,7 +1793,7 @@ func TestServiceFetchTaggedNoData(t *testing.T) {
index.QueryOptions{
StartInclusive: start,
EndExclusive: end,
- Limit: 10,
+ SeriesLimit: 10,
}).Return(index.QueryResult{Results: resMap, Exhaustive: true}, nil)
startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
@@ -1791,7 +1828,7 @@ func TestServiceFetchTaggedNoData(t *testing.T) {
}
func TestServiceFetchTaggedErrs(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1829,7 +1866,7 @@ func TestServiceFetchTaggedErrs(t *testing.T) {
index.QueryOptions{
StartInclusive: start,
EndExclusive: end,
- Limit: 10,
+ SeriesLimit: 10,
}).Return(index.QueryResult{}, fmt.Errorf("random err"))
_, err = service.FetchTagged(tctx, &rpc.FetchTaggedRequest{
NameSpace: []byte(nsID),
@@ -1843,7 +1880,7 @@ func TestServiceFetchTaggedErrs(t *testing.T) {
}
func TestServiceAggregate(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1879,7 +1916,7 @@ func TestServiceAggregate(t *testing.T) {
QueryOptions: index.QueryOptions{
StartInclusive: start,
EndExclusive: end,
- Limit: 10,
+ SeriesLimit: 10,
},
FieldFilter: index.AggregateFieldFilter{
[]byte("foo"), []byte("bar"),
@@ -1926,8 +1963,84 @@ func TestServiceAggregate(t *testing.T) {
require.Equal(t, 0, len(r.Results[1].TagValues))
}
+func TestServiceAggregateNameOnly(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ mockDB := storage.NewMockDatabase(ctrl)
+ mockDB.EXPECT().Options().Return(testStorageOpts).AnyTimes()
+ mockDB.EXPECT().IsOverloaded().Return(false)
+
+ service := NewService(mockDB, testTChannelThriftOptions).(*service)
+
+ tctx, _ := tchannelthrift.NewContext(time.Minute)
+ ctx := tchannelthrift.Context(tctx)
+ defer ctx.Close()
+
+ start := time.Now().Add(-2 * time.Hour)
+ end := start.Add(2 * time.Hour)
+
+ start, end = start.Truncate(time.Second), end.Truncate(time.Second)
+ nsID := "metrics"
+
+ req, err := idx.NewRegexpQuery([]byte("foo"), []byte("b.*"))
+ require.NoError(t, err)
+ qry := index.Query{Query: req}
+
+ resMap := index.NewAggregateResults(ident.StringID(nsID),
+ index.AggregateResultsOptions{}, testIndexOptions)
+ resMap.Map().Set(ident.StringID("foo"), index.AggregateValues{})
+ resMap.Map().Set(ident.StringID("bar"), index.AggregateValues{})
+ mockDB.EXPECT().AggregateQuery(
+ ctx,
+ ident.NewIDMatcher(nsID),
+ index.NewQueryMatcher(qry),
+ index.AggregationOptions{
+ QueryOptions: index.QueryOptions{
+ StartInclusive: start,
+ EndExclusive: end,
+ SeriesLimit: 10,
+ },
+ FieldFilter: index.AggregateFieldFilter{
+ []byte("foo"), []byte("bar"),
+ },
+ Type: index.AggregateTagNames,
+ }).Return(
+ index.AggregateQueryResult{Results: resMap, Exhaustive: true}, nil)
+
+ startNanos, err := convert.ToValue(start, rpc.TimeType_UNIX_NANOSECONDS)
+ require.NoError(t, err)
+ endNanos, err := convert.ToValue(end, rpc.TimeType_UNIX_NANOSECONDS)
+ require.NoError(t, err)
+ var limit int64 = 10
+ data, err := idx.Marshal(req)
+ require.NoError(t, err)
+ r, err := service.AggregateRaw(tctx, &rpc.AggregateQueryRawRequest{
+ NameSpace: []byte(nsID),
+ Query: data,
+ RangeStart: startNanos,
+ RangeEnd: endNanos,
+ Limit: &limit,
+ AggregateQueryType: rpc.AggregateQueryType_AGGREGATE_BY_TAG_NAME,
+ TagNameFilter: [][]byte{
+ []byte("foo"), []byte("bar"),
+ },
+ })
+ require.NoError(t, err)
+
+ // sort to order results to make test deterministic.
+ sort.Slice(r.Results, func(i, j int) bool {
+ return bytes.Compare(r.Results[i].TagName, r.Results[j].TagName) < 0
+ })
+ require.Equal(t, 2, len(r.Results))
+ require.Equal(t, "bar", string(r.Results[0].TagName))
+ require.Equal(t, 0, len(r.Results[0].TagValues))
+ require.Equal(t, "foo", string(r.Results[1].TagName))
+ require.Equal(t, 0, len(r.Results[1].TagValues))
+}
+
func TestServiceWrite(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1965,7 +2078,7 @@ func TestServiceWrite(t *testing.T) {
}
func TestServiceWriteOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -1991,7 +2104,7 @@ func TestServiceWriteOverloaded(t *testing.T) {
}
func TestServiceWriteDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -2015,7 +2128,7 @@ func TestServiceWriteDatabaseNotSet(t *testing.T) {
}
func TestServiceWriteTagged(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2066,7 +2179,7 @@ func TestServiceWriteTagged(t *testing.T) {
}
func TestServiceWriteTaggedOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2092,7 +2205,7 @@ func TestServiceWriteTaggedOverloaded(t *testing.T) {
}
func TestServiceWriteTaggedDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -2115,7 +2228,7 @@ func TestServiceWriteTaggedDatabaseNotSet(t *testing.T) {
}
func TestServiceWriteBatchRaw(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2138,7 +2251,7 @@ func TestServiceWriteBatchRaw(t *testing.T) {
{"bar", time.Now().Truncate(time.Second), 42.42},
}
- writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil)
+ writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil)
mockDB.EXPECT().
BatchWriter(ident.NewIDMatcher(nsID), len(values)).
Return(writeBatch, nil)
@@ -2169,7 +2282,7 @@ func TestServiceWriteBatchRaw(t *testing.T) {
}
func TestServiceWriteBatchRawV2SingleNS(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2192,7 +2305,7 @@ func TestServiceWriteBatchRawV2SingleNS(t *testing.T) {
{"bar", time.Now().Truncate(time.Second), 42.42},
}
- writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil)
+ writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil)
mockDB.EXPECT().
BatchWriter(ident.NewIDMatcher(nsID), len(values)).
Return(writeBatch, nil)
@@ -2224,7 +2337,7 @@ func TestServiceWriteBatchRawV2SingleNS(t *testing.T) {
}
func TestServiceWriteBatchRawV2MultiNS(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2249,8 +2362,8 @@ func TestServiceWriteBatchRawV2MultiNS(t *testing.T) {
{"bar", time.Now().Truncate(time.Second), 42.42},
}
- writeBatch1 = ts.NewWriteBatch(len(values), ident.StringID(nsID1), nil)
- writeBatch2 = ts.NewWriteBatch(len(values), ident.StringID(nsID2), nil)
+ writeBatch1 = writes.NewWriteBatch(len(values), ident.StringID(nsID1), nil)
+ writeBatch2 = writes.NewWriteBatch(len(values), ident.StringID(nsID2), nil)
)
mockDB.EXPECT().
@@ -2292,7 +2405,7 @@ func TestServiceWriteBatchRawV2MultiNS(t *testing.T) {
}
func TestServiceWriteBatchRawOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2314,7 +2427,7 @@ func TestServiceWriteBatchRawOverloaded(t *testing.T) {
// TestServiceWriteBatchRawOverMaxOutstandingRequests tests that the WriteBatchRaw endpoint
// will reject requests if the number of outstanding write requests has hit the maximum.
func TestServiceWriteBatchRawOverMaxOutstandingRequests(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2343,7 +2456,7 @@ func TestServiceWriteBatchRawOverMaxOutstandingRequests(t *testing.T) {
testIsComplete = make(chan struct{}, 0)
requestIsOutstanding = make(chan struct{}, 0)
)
- writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil)
+ writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil)
mockDB.EXPECT().
BatchWriter(ident.NewIDMatcher(nsID), len(values)).
Do(func(nsID ident.ID, numValues int) {
@@ -2400,7 +2513,7 @@ func TestServiceWriteBatchRawOverMaxOutstandingRequests(t *testing.T) {
}
func TestServiceWriteBatchRawDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -2417,7 +2530,7 @@ func TestServiceWriteBatchRawDatabaseNotSet(t *testing.T) {
}
func TestServiceWriteTaggedBatchRaw(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2451,7 +2564,7 @@ func TestServiceWriteTaggedBatchRaw(t *testing.T) {
{"bar", "c|dd", time.Now().Truncate(time.Second), 42.42},
}
- writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil)
+ writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil)
mockDB.EXPECT().
BatchWriter(ident.NewIDMatcher(nsID), len(values)).
Return(writeBatch, nil)
@@ -2483,7 +2596,7 @@ func TestServiceWriteTaggedBatchRaw(t *testing.T) {
}
func TestServiceWriteTaggedBatchRawV2(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2517,7 +2630,7 @@ func TestServiceWriteTaggedBatchRawV2(t *testing.T) {
{"bar", "c|dd", time.Now().Truncate(time.Second), 42.42},
}
- writeBatch := ts.NewWriteBatch(len(values), ident.StringID(nsID), nil)
+ writeBatch := writes.NewWriteBatch(len(values), ident.StringID(nsID), nil)
mockDB.EXPECT().
BatchWriter(ident.NewIDMatcher(nsID), len(values)).
Return(writeBatch, nil)
@@ -2550,7 +2663,7 @@ func TestServiceWriteTaggedBatchRawV2(t *testing.T) {
}
func TestServiceWriteTaggedBatchRawV2MultiNS(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2584,8 +2697,8 @@ func TestServiceWriteTaggedBatchRawV2MultiNS(t *testing.T) {
{"foo", "a|b", time.Now().Truncate(time.Second), 12.34},
{"bar", "c|dd", time.Now().Truncate(time.Second), 42.42},
}
- writeBatch1 = ts.NewWriteBatch(len(values), ident.StringID(nsID1), nil)
- writeBatch2 = ts.NewWriteBatch(len(values), ident.StringID(nsID2), nil)
+ writeBatch1 = writes.NewWriteBatch(len(values), ident.StringID(nsID1), nil)
+ writeBatch2 = writes.NewWriteBatch(len(values), ident.StringID(nsID2), nil)
)
mockDB.EXPECT().
@@ -2628,7 +2741,7 @@ func TestServiceWriteTaggedBatchRawV2MultiNS(t *testing.T) {
}
func TestServiceWriteTaggedBatchRawOverloaded(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2648,7 +2761,7 @@ func TestServiceWriteTaggedBatchRawOverloaded(t *testing.T) {
}
func TestServiceWriteTaggedBatchRawDatabaseNotSet(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
var (
@@ -2665,7 +2778,7 @@ func TestServiceWriteTaggedBatchRawDatabaseNotSet(t *testing.T) {
}
func TestServiceWriteTaggedBatchRawUnknownError(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2727,7 +2840,7 @@ func TestServiceWriteTaggedBatchRawUnknownError(t *testing.T) {
}
func TestServiceRepair(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2747,7 +2860,7 @@ func TestServiceRepair(t *testing.T) {
}
func TestServiceTruncate(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockDB := storage.NewMockDatabase(ctrl)
@@ -2772,7 +2885,7 @@ func TestServiceTruncate(t *testing.T) {
}
func TestServiceSetPersistRateLimit(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
runtimeOpts := runtime.NewOptions()
@@ -2807,7 +2920,7 @@ func TestServiceSetPersistRateLimit(t *testing.T) {
}
func TestServiceSetWriteNewSeriesAsync(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
runtimeOpts := runtime.NewOptions().
@@ -2839,7 +2952,7 @@ func TestServiceSetWriteNewSeriesAsync(t *testing.T) {
}
func TestServiceSetWriteNewSeriesBackoffDuration(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
runtimeOpts := runtime.NewOptions().
@@ -2874,7 +2987,7 @@ func TestServiceSetWriteNewSeriesBackoffDuration(t *testing.T) {
}
func TestServiceSetWriteNewSeriesLimitPerShardPerSecond(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
runtimeOpts := runtime.NewOptions().
diff --git a/src/dbnode/network/server/tchannelthrift/options.go b/src/dbnode/network/server/tchannelthrift/options.go
index 837d1ef6fd..9e2d7dbf21 100644
--- a/src/dbnode/network/server/tchannelthrift/options.go
+++ b/src/dbnode/network/server/tchannelthrift/options.go
@@ -62,11 +62,13 @@ func NewOptions() Options {
})
tagEncoderPool := serialize.NewTagEncoderPool(
- serialize.NewTagEncoderOptions(), poolOptions)
+ serialize.NewTagEncoderOptions(),
+ poolOptions)
tagEncoderPool.Init()
tagDecoderPool := serialize.NewTagDecoderPool(
- serialize.NewTagDecoderOptions(), poolOptions)
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
+ poolOptions)
tagDecoderPool.Init()
bytesWrapperPool := xpool.NewCheckedBytesWrapperPool(poolOptions)
diff --git a/src/dbnode/persist/fs/bloom_filter.go b/src/dbnode/persist/fs/bloom_filter.go
index 4a2f400d54..09f6e2612d 100644
--- a/src/dbnode/persist/fs/bloom_filter.go
+++ b/src/dbnode/persist/fs/bloom_filter.go
@@ -23,7 +23,7 @@ package fs
import (
"os"
- "github.com/m3db/bloom"
+ "github.com/m3db/bloom/v4"
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/x/mmap"
)
diff --git a/src/dbnode/persist/fs/checked_bytes_by_id_new_map_gen.go b/src/dbnode/persist/fs/checked_bytes_by_id_new_map_gen.go
index 977a475856..2393e45d5c 100644
--- a/src/dbnode/persist/fs/checked_bytes_by_id_new_map_gen.go
+++ b/src/dbnode/persist/fs/checked_bytes_by_id_new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/persist/fs/clone/cloner.go b/src/dbnode/persist/fs/clone/cloner.go
index 74cb7cf4e6..ea9acdae95 100644
--- a/src/dbnode/persist/fs/clone/cloner.go
+++ b/src/dbnode/persist/fs/clone/cloner.go
@@ -97,7 +97,9 @@ func (c *cloner) Clone(src FileSetID, dest FileSetID, destBlocksize time.Duratio
}
data.IncRef()
- if err := writer.Write(id, tags, data, checksum); err != nil {
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ if err := writer.Write(metadata, data, checksum); err != nil {
return fmt.Errorf("unexpected error while writing data: %v", err)
}
data.DecRef()
diff --git a/src/dbnode/persist/fs/clone/cloner_test.go b/src/dbnode/persist/fs/clone/cloner_test.go
index b685b152e9..1ea8a8b447 100644
--- a/src/dbnode/persist/fs/clone/cloner_test.go
+++ b/src/dbnode/persist/fs/clone/cloner_test.go
@@ -29,6 +29,7 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
@@ -154,7 +155,9 @@ func writeTestData(t *testing.T, bs time.Duration, src FileSetID, opts Options)
ident.StringTag("qux", "qaz"),
)
}
- require.NoError(t, w.Write(id, tags, testBytes, 1234))
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ require.NoError(t, w.Write(metadata, testBytes, 1234))
}
require.NoError(t, w.Close())
}
diff --git a/src/dbnode/persist/fs/commitlog/chunk_reader.go b/src/dbnode/persist/fs/commitlog/chunk_reader.go
index 9e34f99a8d..8b2dd33077 100644
--- a/src/dbnode/persist/fs/commitlog/chunk_reader.go
+++ b/src/dbnode/persist/fs/commitlog/chunk_reader.go
@@ -22,6 +22,7 @@ package commitlog
import (
"bufio"
+ "io"
"os"
"github.com/m3db/m3/src/dbnode/digest"
@@ -37,23 +38,25 @@ const (
)
type chunkReader struct {
- fd *os.File
- buffer *bufio.Reader
- remaining int
- charBuff []byte
+ fd *os.File
+ buffer *bufio.Reader
+ chunkData []byte
+ chunkDataRemaining int
+ charBuff []byte
}
func newChunkReader(bufferLen int) *chunkReader {
return &chunkReader{
- buffer: bufio.NewReaderSize(nil, bufferLen),
- charBuff: make([]byte, 1),
+ buffer: bufio.NewReaderSize(nil, bufferLen),
+ chunkData: make([]byte, bufferLen),
+ charBuff: make([]byte, 1),
}
}
func (r *chunkReader) reset(fd *os.File) {
r.fd = fd
r.buffer.Reset(fd)
- r.remaining = 0
+ r.chunkDataRemaining = 0
}
func (r *chunkReader) readHeader() error {
@@ -80,18 +83,36 @@ func (r *chunkReader) readHeader() error {
return err
}
- // Verify data checksum
- data, err := r.buffer.Peek(int(size))
+ // Setup a chunk data buffer so that chunk data can be loaded into it.
+ chunkDataSize := int(size)
+ if chunkDataSize > cap(r.chunkData) {
+ // Increase chunkData capacity so that it can fit the new chunkData.
+ chunkDataCap := cap(r.chunkData)
+ for chunkDataCap < chunkDataSize {
+ chunkDataCap *= 2
+ }
+ r.chunkData = make([]byte, chunkDataSize, chunkDataCap)
+ } else {
+ // Reuse existing chunk data buffer if possible.
+ r.chunkData = r.chunkData[:chunkDataSize]
+ }
+
+ // To validate checksum of chunk data all the chunk data needs to be loaded into memory at once. Chunk data size is // not bounded to the flush size so peeking chunk data in order to compute checksum may result in bufio's buffer
+ // full error. To circumnavigate this issue load the chunk data into chunk reader's buffer to compute checksum
+ // instead of trying to compute checksum off of fixed size r.buffer by peeking.
+ // See https://github.com/m3db/m3/pull/2148 for details.
+ _, err = io.ReadFull(r.buffer, r.chunkData)
if err != nil {
return err
}
- if digest.Checksum(data) != checksumData {
+ // Verify data checksum
+ if digest.Checksum(r.chunkData) != checksumData {
return errCommitLogReaderChunkSizeChecksumMismatch
}
// Set remaining data to be consumed
- r.remaining = int(size)
+ r.chunkDataRemaining = int(size)
return nil
}
@@ -100,15 +121,13 @@ func (r *chunkReader) Read(p []byte) (int, error) {
size := len(p)
read := 0
// Check if requesting for size larger than this chunk
- if r.remaining < size {
+ if r.chunkDataRemaining < size {
// Copy any remaining
- if r.remaining > 0 {
- n, err := r.buffer.Read(p[:r.remaining])
- r.remaining -= n
+ if r.chunkDataRemaining > 0 {
+ chunkDataOffset := len(r.chunkData) - r.chunkDataRemaining
+ n := copy(p, r.chunkData[chunkDataOffset:])
+ r.chunkDataRemaining -= n
read += n
- if err != nil {
- return read, err
- }
}
// Read next header
@@ -125,10 +144,11 @@ func (r *chunkReader) Read(p []byte) (int, error) {
return read, err
}
- n, err := r.buffer.Read(p)
- r.remaining -= n
+ chunkDataOffset := len(r.chunkData) - r.chunkDataRemaining
+ n := copy(p, r.chunkData[chunkDataOffset:][:len(p)])
+ r.chunkDataRemaining -= n
read += n
- return read, err
+ return read, nil
}
func (r *chunkReader) ReadByte() (c byte, err error) {
diff --git a/src/dbnode/persist/fs/commitlog/commit_log.go b/src/dbnode/persist/fs/commitlog/commit_log.go
index 3df38972ce..40ab3b8298 100644
--- a/src/dbnode/persist/fs/commitlog/commit_log.go
+++ b/src/dbnode/persist/fs/commitlog/commit_log.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
xtime "github.com/m3db/m3/src/x/time"
@@ -65,8 +66,8 @@ type commitLogFailFn func(err error)
// we can handle both cases without having to allocate as slice of size
// 1 to handle a single write.
type writeOrWriteBatch struct {
- write ts.Write
- writeBatch ts.WriteBatch
+ write writes.Write
+ writeBatch writes.WriteBatch
}
type commitLog struct {
@@ -449,8 +450,8 @@ func (l *commitLog) write() {
// We use these to make the batch and non-batched write paths the same
// by turning non-batched writes into a batch of size one while avoiding
// any allocations.
- var singleBatch = make([]ts.BatchWrite, 1)
- var batch []ts.BatchWrite
+ var singleBatch = make([]writes.BatchWrite, 1)
+ var batch []writes.BatchWrite
for write := range l.writes {
if write.eventType == flushEventType {
@@ -720,7 +721,7 @@ func (l *commitLog) Write(
annotation ts.Annotation,
) error {
return l.writeFn(ctx, writeOrWriteBatch{
- write: ts.Write{
+ write: writes.Write{
Series: series,
Datapoint: datapoint,
Unit: unit,
@@ -731,7 +732,7 @@ func (l *commitLog) Write(
func (l *commitLog) WriteBatch(
ctx context.Context,
- writes ts.WriteBatch,
+ writes writes.WriteBatch,
) error {
return l.writeFn(ctx, writeOrWriteBatch{
writeBatch: writes,
diff --git a/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go b/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go
index 213808714a..57677a0c41 100644
--- a/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go
+++ b/src/dbnode/persist/fs/commitlog/commit_log_conc_test.go
@@ -61,7 +61,7 @@ func TestCommitLogActiveLogsConcurrency(t *testing.T) {
time.Sleep(time.Millisecond)
err := commitLog.Write(
context.NewContext(),
- testSeries(0, "foo.bar", testTags1, 127),
+ testSeries(t, opts, 0, "foo.bar", testTags1, 127),
ts.Datapoint{},
xtime.Second,
nil)
@@ -152,7 +152,7 @@ func TestCommitLogRotateLogsConcurrency(t *testing.T) {
time.Sleep(time.Millisecond)
err := commitLog.Write(
context.NewContext(),
- testSeries(0, "foo.bar", testTags1, 127),
+ testSeries(t, opts, 0, "foo.bar", testTags1, 127),
ts.Datapoint{},
xtime.Second,
nil)
diff --git a/src/dbnode/persist/fs/commitlog/commit_log_mock.go b/src/dbnode/persist/fs/commitlog/commit_log_mock.go
index 4eb0e1eb0a..fe0fa8c15c 100644
--- a/src/dbnode/persist/fs/commitlog/commit_log_mock.go
+++ b/src/dbnode/persist/fs/commitlog/commit_log_mock.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -93,7 +94,7 @@ func (mr *MockCommitLogMockRecorder) Write(ctx, series, datapoint, unit, annotat
}
// WriteBatch mocks base method
-func (m *MockCommitLog) WriteBatch(ctx context.Context, writes ts.WriteBatch) error {
+func (m *MockCommitLog) WriteBatch(ctx context.Context, writes writes.WriteBatch) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteBatch", ctx, writes)
ret0, _ := ret[0].(error)
diff --git a/src/dbnode/persist/fs/commitlog/commit_log_test.go b/src/dbnode/persist/fs/commitlog/commit_log_test.go
index f82d496f06..ea4894aeaa 100644
--- a/src/dbnode/persist/fs/commitlog/commit_log_test.go
+++ b/src/dbnode/persist/fs/commitlog/commit_log_test.go
@@ -21,6 +21,7 @@
package commitlog
import (
+ "bytes"
"errors"
"fmt"
"io/ioutil"
@@ -37,6 +38,8 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
+ "github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -113,6 +116,12 @@ func newTestOptions(
return opts, scope
}
+func randomByteSlice(len int) []byte {
+ arr := make([]byte, len)
+ rand.Read(arr)
+ return arr
+}
+
func cleanup(t *testing.T, opts Options) {
filePathPrefix := opts.FilesystemOptions().FilePathPrefix()
require.NoError(t, os.RemoveAll(filePathPrefix))
@@ -128,16 +137,31 @@ type testWrite struct {
}
func testSeries(
+ t *testing.T,
+ opts Options,
uniqueIndex uint64,
id string,
tags ident.Tags,
shard uint32,
) ts.Series {
+ var (
+ tagEncoderPool = opts.FilesystemOptions().TagEncoderPool()
+ tagSliceIter = ident.NewTagsIterator(ident.Tags{})
+ )
+ tagSliceIter.Reset(tags)
+
+ tagEncoder := tagEncoderPool.Get()
+ err := tagEncoder.Encode(tagSliceIter)
+ require.NoError(t, err)
+
+ encodedTagsChecked, ok := tagEncoder.Data()
+ require.True(t, ok)
+
return ts.Series{
UniqueIndex: uniqueIndex,
Namespace: ident.StringID("testNS"),
ID: ident.StringID(id),
- Tags: tags,
+ EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()),
Shard: shard,
}
}
@@ -154,7 +178,7 @@ func (w testWrite) assert(
require.Equal(t, w.series.Shard, series.Shard)
// ident.Tags.Equal will compare length
- require.True(t, w.series.Tags.Equal(series.Tags))
+ require.True(t, bytes.Equal(w.series.EncodedTags, series.EncodedTags))
require.True(t, w.t.Equal(datapoint.Timestamp))
require.Equal(t, w.v, datapoint.Value)
@@ -354,23 +378,75 @@ func TestCommitLogWrite(t *testing.T) {
opts, scope := newTestOptions(t, overrides{
strategy: StrategyWriteWait,
})
- defer cleanup(t, opts)
-
- commitLog := newTestCommitLog(t, opts)
- writes := []testWrite{
- {testSeries(0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil},
- {testSeries(1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, nil, nil},
+ testCases := []struct {
+ testName string
+ writes []testWrite
+ }{
+ {
+ "Attempt to perform 2 write log writes in parallel to a commit log",
+ []testWrite{
+ {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil},
+ {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, nil, nil},
+ },
+ },
+ {
+ "Buffer almost full after first write. Second write almost fills the buffer",
+ []testWrite{
+ {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil},
+ {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40), nil},
+ },
+ },
+ {
+ "Buffer almost full after first write. Second write almost fills 2*buffer total",
+ []testWrite{
+ {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil},
+ {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40 + opts.FlushSize()), nil},
+ },
+ },
+ {
+ "Buffer almost full after first write. Second write almost fills 3*buffer total",
+ []testWrite{
+ {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize() - 200), nil},
+ {testSeries(t, opts, 1, "foo.baz", ident.NewTags(ident.StringTag("name2", "val2")), 150), time.Now(), 456.789, xtime.Second, randomByteSlice(40 + 2*opts.FlushSize()), nil},
+ },
+ },
+ {
+ "Attempts to perform a write equal to the flush size",
+ []testWrite{
+ {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(opts.FlushSize()), nil},
+ },
+ },
+ {
+ "Attempts to perform a write double the flush size",
+ []testWrite{
+ {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(2 * opts.FlushSize()), nil},
+ },
+ },
+ {
+ "Attempts to perform a write three times the flush size",
+ []testWrite{
+ {testSeries(t, opts, 0, "foo.bar", ident.NewTags(ident.StringTag("name1", "val1")), 127), time.Now(), 123.456, xtime.Second, randomByteSlice(3 * opts.FlushSize()), nil},
+ },
+ },
}
- // Call write sync
- writeCommitLogs(t, scope, commitLog, writes).Wait()
+ for _, testCase := range testCases {
+ t.Run(testCase.testName, func(t *testing.T) {
+ defer cleanup(t, opts)
- // Close the commit log and consequently flush
- require.NoError(t, commitLog.Close())
+ commitLog := newTestCommitLog(t, opts)
- // Assert writes occurred by reading the commit log
- assertCommitLogWritesByIterating(t, commitLog, writes)
+ // Call write sync
+ writeCommitLogs(t, scope, commitLog, testCase.writes).Wait()
+
+ // Close the commit log and consequently flush
+ require.NoError(t, commitLog.Close())
+
+ // Assert writes occurred by reading the commit log
+ assertCommitLogWritesByIterating(t, commitLog, testCase.writes)
+ })
+ }
}
func TestReadCommitLogMissingMetadata(t *testing.T) {
@@ -399,7 +475,7 @@ func TestReadCommitLogMissingMetadata(t *testing.T) {
allSeries := []ts.Series{}
for i := 0; i < 200; i++ {
willNotHaveMetadata := !(i%2 == 0)
- allSeries = append(allSeries, testSeries(
+ allSeries = append(allSeries, testSeries(t, opts,
uint64(i),
"hax",
ident.NewTags(ident.StringTag("name", "val")),
@@ -457,8 +533,8 @@ func TestCommitLogReaderIsNotReusable(t *testing.T) {
commitLog := newTestCommitLog(t, opts)
writes := []testWrite{
- {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil},
- {testSeries(1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Second, nil, nil},
+ {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Second, []byte{1, 2, 3}, nil},
+ {testSeries(t, opts, 1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Second, nil, nil},
}
// Call write sync
@@ -495,9 +571,9 @@ func TestCommitLogIteratorUsesPredicateFilterForNonCorruptFiles(t *testing.T) {
// Writes spaced apart by block size.
writes := []testWrite{
- {testSeries(0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil},
- {testSeries(1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil},
- {testSeries(2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil},
}
defer cleanup(t, opts)
@@ -613,9 +689,9 @@ func TestCommitLogWriteBehind(t *testing.T) {
commitLog := newTestCommitLog(t, opts)
writes := []testWrite{
- {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
- {testSeries(1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Millisecond, nil, nil},
- {testSeries(2, "foo.qux", testTags3, 291), time.Now(), 789.123, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 1, "foo.baz", testTags2, 150), time.Now(), 456.789, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 2, "foo.qux", testTags3, 291), time.Now(), 789.123, xtime.Millisecond, nil, nil},
}
// Call write behind
@@ -635,7 +711,7 @@ func TestCommitLogWriteErrorOnClosed(t *testing.T) {
commitLog := newTestCommitLog(t, opts)
require.NoError(t, commitLog.Close())
- series := testSeries(0, "foo.bar", testTags1, 127)
+ series := testSeries(t, opts, 0, "foo.bar", testTags1, 127)
datapoint := ts.Datapoint{Timestamp: time.Now(), Value: 123.456}
ctx := context.NewContext()
@@ -661,7 +737,7 @@ func TestCommitLogWriteErrorOnFull(t *testing.T) {
// Test filling queue
var writes []testWrite
- series := testSeries(0, "foo.bar", testTags1, 127)
+ series := testSeries(t, opts, 0, "foo.bar", testTags1, 127)
dp := ts.Datapoint{Timestamp: time.Now(), Value: 123.456}
unit := xtime.Millisecond
@@ -704,7 +780,7 @@ func TestCommitLogQueueLength(t *testing.T) {
defer commitLog.Close()
var (
- series = testSeries(0, "foo.bar", testTags1, 127)
+ series = testSeries(t, opts, 0, "foo.bar", testTags1, 127)
dp = ts.Datapoint{Timestamp: time.Now(), Value: 123.456}
unit = xtime.Millisecond
ctx = context.NewContext()
@@ -761,7 +837,7 @@ func TestCommitLogFailOnWriteError(t *testing.T) {
wg := setupCloseOnFail(t, commitLog)
writes := []testWrite{
- {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
}
writeCommitLogs(t, scope, commitLog, writes)
@@ -810,7 +886,7 @@ func TestCommitLogFailOnOpenError(t *testing.T) {
wg := setupCloseOnFail(t, commitLog)
writes := []testWrite{
- {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
}
writeCommitLogs(t, scope, commitLog, writes)
@@ -866,7 +942,7 @@ func TestCommitLogFailOnFlushError(t *testing.T) {
wg := setupCloseOnFail(t, commitLog)
writes := []testWrite{
- {testSeries(0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 0, "foo.bar", testTags1, 127), time.Now(), 123.456, xtime.Millisecond, nil, nil},
}
writeCommitLogs(t, scope, commitLog, writes)
@@ -929,9 +1005,9 @@ func TestCommitLogRotateLogs(t *testing.T) {
// Writes spaced such that they should appear within the same commitlog block.
writes := []testWrite{
- {testSeries(0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil},
- {testSeries(1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil},
- {testSeries(2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 0, "foo.bar", testTags1, 127), start, 123.456, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 1, "foo.baz", testTags2, 150), start.Add(1 * time.Second), 456.789, xtime.Millisecond, nil, nil},
+ {testSeries(t, opts, 2, "foo.qux", testTags3, 291), start.Add(2 * time.Second), 789.123, xtime.Millisecond, nil, nil},
}
for i, write := range writes {
@@ -965,10 +1041,12 @@ func TestCommitLogRotateLogs(t *testing.T) {
}
var (
+ testTag0 = ident.StringTag("name0", "val0")
testTag1 = ident.StringTag("name1", "val1")
testTag2 = ident.StringTag("name2", "val2")
testTag3 = ident.StringTag("name3", "val3")
+ testTags0 = ident.NewTags(testTag0)
testTags1 = ident.NewTags(testTag1)
testTags2 = ident.NewTags(testTag2)
testTags3 = ident.NewTags(testTag3)
@@ -982,22 +1060,32 @@ func TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries(t *testing.T) {
defer cleanup(t, opts)
commitLog := newTestCommitLog(t, opts)
finalized := 0
- finalizeFn := func(_ ts.WriteBatch) {
+ finalizeFn := func(_ writes.WriteBatch) {
finalized++
}
- writes := ts.NewWriteBatch(4, ident.StringID("ns"), finalizeFn)
+ writes := writes.NewWriteBatch(4, ident.StringID("ns"), finalizeFn)
+ testSeriesWrites := []ts.Series{
+ testSeries(t, opts, 0, "foo.bar", testTags0, 42),
+ testSeries(t, opts, 1, "foo.baz", testTags1, 127),
+ testSeries(t, opts, 2, "biz.qaz", testTags2, 321),
+ testSeries(t, opts, 3, "biz.qux", testTags3, 511),
+ }
alignedStart := time.Now().Truncate(time.Hour)
for i := 0; i < 4; i++ {
tt := alignedStart.Add(time.Minute * time.Duration(i))
- writes.Add(i, ident.StringID(fmt.Sprint(i)), tt, float64(i)*10.5, xtime.Second, nil)
+ tagsIter := opts.FilesystemOptions().TagDecoderPool().Get()
+ tagsIter.Reset(checked.NewBytes(testSeriesWrites[i].EncodedTags, nil))
+ writes.AddTagged(i, testSeriesWrites[i].ID, tagsIter,
+ testSeriesWrites[i].EncodedTags,
+ tt, float64(i)*10.5, xtime.Second, nil)
}
writes.SetSkipWrite(0)
- writes.SetOutcome(1, testSeries(1, "foo.bar", testTags1, 127), nil)
- writes.SetOutcome(2, testSeries(2, "err.err", testTags2, 255), errors.New("oops"))
- writes.SetOutcome(3, testSeries(3, "biz.qux", testTags3, 511), nil)
+ writes.SetSeries(1, testSeries(t, opts, 1, "foo.baz", testTags1, 127))
+ writes.SetError(2, errors.New("oops"))
+ writes.SetSeries(3, testSeries(t, opts, 3, "biz.qux", testTags3, 511))
// Call write batch sync
wg := sync.WaitGroup{}
@@ -1037,8 +1125,8 @@ func TestCommitLogBatchWriteDoesNotAddErroredOrSkippedSeries(t *testing.T) {
// Assert writes occurred by reading the commit log
expected := []testWrite{
- {testSeries(1, "foo.bar", testTags1, 127), alignedStart.Add(time.Minute), 10.5, xtime.Second, nil, nil},
- {testSeries(3, "biz.qux", testTags3, 511), alignedStart.Add(time.Minute * 3), 31.5, xtime.Second, nil, nil},
+ {testSeries(t, opts, 1, "foo.baz", testTags1, 127), alignedStart.Add(time.Minute), 10.5, xtime.Second, nil, nil},
+ {testSeries(t, opts, 3, "biz.qux", testTags3, 511), alignedStart.Add(time.Minute * 3), 31.5, xtime.Second, nil, nil},
}
assertCommitLogWritesByIterating(t, commitLog, expected)
diff --git a/src/dbnode/persist/fs/commitlog/read_write_prop_test.go b/src/dbnode/persist/fs/commitlog/read_write_prop_test.go
index ada6737cb1..a5c8e16067 100644
--- a/src/dbnode/persist/fs/commitlog/read_write_prop_test.go
+++ b/src/dbnode/persist/fs/commitlog/read_write_prop_test.go
@@ -23,6 +23,7 @@
package commitlog
import (
+ "bytes"
"errors"
"fmt"
"io/ioutil"
@@ -120,7 +121,7 @@ func TestCommitLogReadWrite(t *testing.T) {
write := seriesWrites.writes[seriesWrites.readPosition]
require.Equal(t, write.series.ID.String(), series.ID.String())
- require.True(t, write.series.Tags.Equal(series.Tags))
+ require.True(t, bytes.Equal(write.series.EncodedTags, series.EncodedTags))
require.Equal(t, write.series.Namespace.String(), series.Namespace.String())
require.Equal(t, write.series.Shard, series.Shard)
require.Equal(t, write.datapoint.Value, datapoint.Value)
@@ -623,7 +624,6 @@ func genWrite() gopter.Gen {
return generatedWrite{
series: ts.Series{
ID: ident.StringID(id),
- Tags: seriesTags,
EncodedTags: seriesEncodedTags,
Namespace: ident.StringID(ns),
Shard: shard,
diff --git a/src/dbnode/persist/fs/commitlog/reader.go b/src/dbnode/persist/fs/commitlog/reader.go
index ac5ea3fed2..4e2fe77938 100644
--- a/src/dbnode/persist/fs/commitlog/reader.go
+++ b/src/dbnode/persist/fs/commitlog/reader.go
@@ -37,7 +37,7 @@ import (
"github.com/m3db/m3/src/x/serialize"
xtime "github.com/m3db/m3/src/x/time"
- "github.com/uber-go/atomic"
+ "go.uber.org/atomic"
)
var (
@@ -170,8 +170,9 @@ func (r *reader) Read() (LogEntry, error) {
result := LogEntry{
Series: metadata,
Datapoint: ts.Datapoint{
- Timestamp: time.Unix(0, entry.Timestamp),
- Value: entry.Value,
+ Timestamp: time.Unix(0, entry.Timestamp),
+ TimestampNanos: xtime.UnixNano(entry.Timestamp),
+ Value: entry.Value,
},
Unit: xtime.Unit(entry.Unit),
Metadata: LogEntryMetadata{
@@ -287,33 +288,19 @@ func (r *reader) seriesMetadataForEntry(
// Find or allocate the namespace ID.
namespaceID := r.namespaceIDReused(decoded.Namespace)
- var (
- idPool = r.opts.commitLogOptions.IdentifierPool()
- tags ident.Tags
- tagBytesLen = len(decoded.EncodedTags)
- )
- if tagBytesLen != 0 {
- r.tagDecoderCheckedBytes.Reset(decoded.EncodedTags)
- r.tagDecoder.Reset(r.tagDecoderCheckedBytes)
-
- tags = idPool.Tags()
- for r.tagDecoder.Next() {
- curr := r.tagDecoder.Current()
- tags.Append(idPool.CloneTag(curr))
- }
- err = r.tagDecoder.Err()
- if err != nil {
- return ts.Series{}, err
- }
- }
+ // Need to copy encoded tags since will be invalid when
+ // progressing to next record.
+ encodedTags := append(
+ make([]byte, 0, len(decoded.EncodedTags)),
+ decoded.EncodedTags...)
- seriesID := idPool.BinaryID(id)
+ idPool := r.opts.commitLogOptions.IdentifierPool()
metadata = ts.Series{
UniqueIndex: entry.Index,
- ID: seriesID,
+ ID: idPool.BinaryID(id),
Namespace: namespaceID,
Shard: decoded.Shard,
- Tags: tags,
+ EncodedTags: ts.EncodedTags(encodedTags),
}
r.metadataLookup[entry.Index] = metadata
diff --git a/src/dbnode/persist/fs/commitlog/types.go b/src/dbnode/persist/fs/commitlog/types.go
index cd13dae10b..05ff965db9 100644
--- a/src/dbnode/persist/fs/commitlog/types.go
+++ b/src/dbnode/persist/fs/commitlog/types.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -66,7 +67,7 @@ type CommitLog interface {
// WriteBatch is the same as Write, but in batch.
WriteBatch(
ctx context.Context,
- writes ts.WriteBatch,
+ writes writes.WriteBatch,
) error
// Close the commit log
diff --git a/src/dbnode/persist/fs/commitlog/writer.go b/src/dbnode/persist/fs/commitlog/writer.go
index 516b8c9136..864184e533 100644
--- a/src/dbnode/persist/fs/commitlog/writer.go
+++ b/src/dbnode/persist/fs/commitlog/writer.go
@@ -35,9 +35,7 @@ import (
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/dbnode/persist/schema"
"github.com/m3db/m3/src/dbnode/ts"
- "github.com/m3db/m3/src/x/ident"
xos "github.com/m3db/m3/src/x/os"
- "github.com/m3db/m3/src/x/serialize"
xtime "github.com/m3db/m3/src/x/time"
)
@@ -109,8 +107,6 @@ type writer struct {
logEncoder *msgpack.Encoder
logEncoderBuff []byte
metadataEncoderBuff []byte
- tagEncoder serialize.TagEncoder
- tagSliceIter ident.TagsIterator
opts Options
}
@@ -133,8 +129,6 @@ func newCommitLogWriter(
logEncoder: msgpack.NewEncoder(),
logEncoderBuff: make([]byte, 0, defaultEncoderBuffSize),
metadataEncoderBuff: make([]byte, 0, defaultEncoderBuffSize),
- tagEncoder: opts.FilesystemOptions().TagEncoderPool().Get(),
- tagSliceIter: ident.NewTagsIterator(ident.Tags{}),
opts: opts,
}
}
@@ -203,34 +197,13 @@ func (w *writer) Write(
seen := w.seen.Test(uint(series.UniqueIndex))
if !seen {
- var encodedTags []byte
- if series.EncodedTags != nil {
- // If already serialized use the serialized tags.
- encodedTags = series.EncodedTags
- } else if series.Tags.Values() != nil {
- // Otherwise serialize the tags.
- w.tagSliceIter.Reset(series.Tags)
- w.tagEncoder.Reset()
- err := w.tagEncoder.Encode(w.tagSliceIter)
- if err != nil {
- return err
- }
-
- encodedTagsChecked, ok := w.tagEncoder.Data()
- if !ok {
- return errTagEncoderDataNotAvailable
- }
-
- encodedTags = encodedTagsChecked.Bytes()
- }
-
// If "idx" likely hasn't been written to commit log
// yet we need to include series metadata
var metadata schema.LogMetadata
metadata.ID = series.ID.Bytes()
metadata.Namespace = series.Namespace.Bytes()
metadata.Shard = series.Shard
- metadata.EncodedTags = encodedTags
+ metadata.EncodedTags = series.EncodedTags
var err error
w.metadataEncoderBuff, err = msgpack.EncodeLogMetadataFast(w.metadataEncoderBuff[:0], metadata)
@@ -349,6 +322,9 @@ func (w *fsChunkWriter) sync() error {
return w.fd.Sync()
}
+// Writes a custom header in front of p to a file and returns number of bytes of p successfully written to the file.
+// If the header or p is not fully written to the file, then this method returns number of bytes of p actually written
+// to the file and an error explaining the reason of failure to write fully to the file.
func (w *fsChunkWriter) Write(p []byte) (int, error) {
size := len(p)
@@ -379,9 +355,15 @@ func (w *fsChunkWriter) Write(p []byte) (int, error) {
// Write contents to file descriptor
n, err := w.fd.Write(w.buff)
+ // Count bytes successfully written from slice p
+ pBytesWritten := n - chunkHeaderLen
+ if pBytesWritten < 0 {
+ pBytesWritten = 0
+ }
+
if err != nil {
w.flushFn(err)
- return n, err
+ return pBytesWritten, err
}
// Fsync if required to
@@ -391,5 +373,5 @@ func (w *fsChunkWriter) Write(p []byte) (int, error) {
// Fire flush callback
w.flushFn(err)
- return n, err
+ return pBytesWritten, err
}
diff --git a/src/dbnode/persist/fs/files.go b/src/dbnode/persist/fs/files.go
index c2643ec622..1de9a425d6 100644
--- a/src/dbnode/persist/fs/files.go
+++ b/src/dbnode/persist/fs/files.go
@@ -50,6 +50,7 @@ var (
timeZero time.Time
errSnapshotTimeAndIDZero = errors.New("tried to read snapshot time and ID of zero value")
+ errNonSnapshotFileset = errors.New("tried to determine snapshot time and id of non-snapshot")
)
const (
@@ -103,7 +104,7 @@ const (
// FileSetFile represents a set of FileSet files for a given block start
type FileSetFile struct {
ID FileSetFileIdentifier
- AbsoluteFilepaths []string
+ AbsoluteFilePaths []string
CachedSnapshotTime time.Time
CachedSnapshotID uuid.UUID
@@ -117,11 +118,8 @@ func (f *FileSetFile) SnapshotTimeAndID() (time.Time, uuid.UUID, error) {
if f.IsZero() {
return time.Time{}, nil, errSnapshotTimeAndIDZero
}
- if len(f.AbsoluteFilepaths) > 0 &&
- !strings.Contains(f.AbsoluteFilepaths[0], snapshotDirName) {
- return time.Time{}, nil, fmt.Errorf(
- "tried to determine snapshot time and id of non-snapshot: %s",
- f.AbsoluteFilepaths[0])
+ if _, ok := f.SnapshotFilepath(); !ok {
+ return time.Time{}, nil, errNonSnapshotFileset
}
if !f.CachedSnapshotTime.IsZero() || f.CachedSnapshotID != nil {
@@ -140,9 +138,36 @@ func (f *FileSetFile) SnapshotTimeAndID() (time.Time, uuid.UUID, error) {
return f.CachedSnapshotTime, f.CachedSnapshotID, nil
}
+// InfoFilePath returns the info file path of a filesetfile (if found).
+func (f *FileSetFile) InfoFilePath() (string, bool) {
+ return f.filepath(infoFileSuffix)
+}
+
+// SnapshotFilepath returns the info file path of a filesetfile (if found).
+func (f *FileSetFile) SnapshotFilepath() (string, bool) {
+ return f.filepath(snapshotDirName)
+}
+
// IsZero returns whether the FileSetFile is a zero value.
func (f FileSetFile) IsZero() bool {
- return len(f.AbsoluteFilepaths) == 0
+ return len(f.AbsoluteFilePaths) == 0
+}
+
+func (f *FileSetFile) filepath(pathContains string) (string, bool) {
+ var (
+ found bool
+ foundIdx int
+ )
+ for idx, path := range f.AbsoluteFilePaths {
+ if strings.Contains(path, pathContains) {
+ found = true
+ foundIdx = idx
+ }
+ }
+ if found {
+ return f.AbsoluteFilePaths[foundIdx], true
+ }
+ return "", false
}
// HasCompleteCheckpointFile returns a bool indicating whether the given set of
@@ -159,7 +184,7 @@ func (f *FileSetFile) HasCompleteCheckpointFile() bool {
}
func (f *FileSetFile) evalHasCompleteCheckpointFile() LazyEvalBool {
- for _, fileName := range f.AbsoluteFilepaths {
+ for _, fileName := range f.AbsoluteFilePaths {
if strings.Contains(fileName, checkpointFileSuffix) {
exists, err := CompleteCheckpointFileExists(fileName)
if err != nil {
@@ -182,7 +207,7 @@ type FileSetFilesSlice []FileSetFile
func (f FileSetFilesSlice) Filepaths() []string {
flattened := []string{}
for _, fileset := range f {
- flattened = append(flattened, fileset.AbsoluteFilepaths...)
+ flattened = append(flattened, fileset.AbsoluteFilePaths...)
}
return flattened
@@ -262,9 +287,9 @@ type SnapshotMetadata struct {
CheckpointFilePath string
}
-// AbsoluteFilepaths returns a slice of all the absolute filepaths associated
+// AbsoluteFilePaths returns a slice of all the absolute filepaths associated
// with a snapshot metadata.
-func (s SnapshotMetadata) AbsoluteFilepaths() []string {
+func (s SnapshotMetadata) AbsoluteFilePaths() []string {
return []string{s.MetadataFilePath, s.CheckpointFilePath}
}
@@ -285,11 +310,26 @@ type SnapshotMetadataIdentifier struct {
UUID uuid.UUID
}
+// NewFileSetFileIdentifier creates a new FileSetFileIdentifier.
+func NewFileSetFileIdentifier(
+ namespace ident.ID,
+ blockStart time.Time,
+ shard uint32,
+ volumeIndex int,
+) FileSetFileIdentifier {
+ return FileSetFileIdentifier{
+ Namespace: namespace,
+ Shard: shard,
+ BlockStart: blockStart,
+ VolumeIndex: volumeIndex,
+ }
+}
+
// NewFileSetFile creates a new FileSet file
func NewFileSetFile(id FileSetFileIdentifier, filePathPrefix string) FileSetFile {
return FileSetFile{
ID: id,
- AbsoluteFilepaths: []string{},
+ AbsoluteFilePaths: []string{},
filePathPrefix: filePathPrefix,
}
}
@@ -651,7 +691,7 @@ type forEachInfoFileSelector struct {
shard uint32 // shard only applicable for data content type
}
-type infoFileFn func(fname string, id FileSetFileIdentifier, infoData []byte)
+type infoFileFn func(file FileSetFile, infoData []byte)
func forEachInfoFile(
args forEachInfoFileSelector,
@@ -664,7 +704,7 @@ func forEachInfoFile(
filePathPrefix: args.filePathPrefix,
namespace: args.namespace,
shard: args.shard,
- pattern: infoFilePattern,
+ pattern: filesetFilePattern,
})
if err != nil {
return
@@ -758,11 +798,12 @@ func forEachInfoFile(
if err != nil {
continue
}
- if len(matched[i].AbsoluteFilepaths) != 1 {
+ // Guarantee that every matched fileset has an info file.
+ if _, ok := matched[i].InfoFilePath(); !ok {
continue
}
- fn(matched[i].AbsoluteFilepaths[0], matched[i].ID, infoData)
+ fn(matched[i], infoData)
}
}
@@ -802,26 +843,28 @@ func ReadInfoFiles(
shard uint32,
readerBufferSize int,
decodingOpts msgpack.DecodingOptions,
+ fileSetType persist.FileSetType,
) []ReadInfoFileResult {
var infoFileResults []ReadInfoFileResult
decoder := msgpack.NewDecoder(decodingOpts)
forEachInfoFile(
forEachInfoFileSelector{
- fileSetType: persist.FileSetFlushType,
+ fileSetType: fileSetType,
contentType: persist.FileSetDataContentType,
filePathPrefix: filePathPrefix,
namespace: namespace,
shard: shard,
},
readerBufferSize,
- func(filepath string, id FileSetFileIdentifier, data []byte) {
+ func(file FileSetFile, data []byte) {
+ filePath, _ := file.InfoFilePath()
decoder.Reset(msgpack.NewByteDecoderStream(data))
info, err := decoder.DecodeIndexInfo()
infoFileResults = append(infoFileResults, ReadInfoFileResult{
Info: info,
Err: readInfoFileResultError{
err: err,
- filepath: filepath,
+ filepath: filePath,
},
})
})
@@ -830,9 +873,10 @@ func ReadInfoFiles(
// ReadIndexInfoFileResult is the result of reading an info file
type ReadIndexInfoFileResult struct {
- ID FileSetFileIdentifier
- Info index.IndexInfo
- Err ReadInfoFileResultError
+ ID FileSetFileIdentifier
+ Info index.IndexVolumeInfo
+ AbsoluteFilePaths []string
+ Err ReadInfoFileResultError
}
// ReadIndexInfoFiles reads all the valid index info entries. Even if ReadIndexInfoFiles returns an error,
@@ -851,12 +895,15 @@ func ReadIndexInfoFiles(
namespace: namespace,
},
readerBufferSize,
- func(filepath string, id FileSetFileIdentifier, data []byte) {
- var info index.IndexInfo
+ func(file FileSetFile, data []byte) {
+ filepath, _ := file.InfoFilePath()
+ id := file.ID
+ var info index.IndexVolumeInfo
err := info.Unmarshal(data)
infoFileResults = append(infoFileResults, ReadIndexInfoFileResult{
- ID: id,
- Info: info,
+ ID: id,
+ Info: info,
+ AbsoluteFilePaths: file.AbsoluteFilePaths,
Err: readInfoFileResultError{
err: err,
filepath: filepath,
@@ -980,13 +1027,36 @@ func IndexSnapshotFiles(filePathPrefix string, namespace ident.ID) (FileSetFiles
// FileSetAt returns a FileSetFile for the given namespace/shard/blockStart/volume combination if it exists.
func FileSetAt(filePathPrefix string, namespace ident.ID, shard uint32, blockStart time.Time, volume int) (FileSetFile, bool, error) {
+ var pattern string
+ // If this is the initial volume, then we need to check if files were written with the legacy file naming (i.e.
+ // without the volume index) so that we can properly locate the fileset.
+ if volume == 0 {
+ dir := ShardDataDirPath(filePathPrefix, namespace, shard)
+ isLegacy, err := isFirstVolumeLegacy(dir, blockStart, checkpointFileSuffix)
+ // NB(nate): don't propagate ErrCheckpointFileNotFound here as expectation is to simply return an
+ // empty FileSetFile if files do not exist.
+ if err == ErrCheckpointFileNotFound {
+ return FileSetFile{}, false, nil
+ } else if err != nil && err != ErrCheckpointFileNotFound {
+ return FileSetFile{}, false, err
+ }
+
+ if isLegacy {
+ pattern = filesetFileForTime(blockStart, anyLowerCaseCharsPattern)
+ }
+ }
+
+ if len(pattern) == 0 {
+ pattern = filesetFileForTimeAndVolumeIndex(blockStart, volume, anyLowerCaseCharsPattern)
+ }
+
matched, err := filesetFiles(filesetFilesSelector{
fileSetType: persist.FileSetFlushType,
contentType: persist.FileSetDataContentType,
filePathPrefix: filePathPrefix,
namespace: namespace,
shard: shard,
- pattern: filesetFileForTime(blockStart, anyLowerCaseCharsPattern),
+ pattern: pattern,
})
if err != nil {
return FileSetFile{}, false, err
@@ -1053,7 +1123,7 @@ func DeleteFileSetAt(filePathPrefix string, namespace ident.ID, shard uint32, bl
return fmt.Errorf("fileset for blockStart: %d does not exist", blockStart.Unix())
}
- return DeleteFiles(fileset.AbsoluteFilepaths)
+ return DeleteFiles(fileset.AbsoluteFilePaths)
}
// DataFileSetsBefore returns all the flush data fileset paths whose timestamps are earlier than a given time.
@@ -1255,7 +1325,7 @@ func filesetFiles(args filesetFilesSelector) (FileSetFilesSlice, error) {
latestBlockStart = currentFileBlockStart
latestVolumeIndex = volumeIndex
- latestFileSetFile.AbsoluteFilepaths = append(latestFileSetFile.AbsoluteFilepaths, file)
+ latestFileSetFile.AbsoluteFilePaths = append(latestFileSetFile.AbsoluteFilePaths, file)
}
filesetFiles = append(filesetFiles, latestFileSetFile)
@@ -1324,6 +1394,11 @@ func DataDirPath(prefix string) string {
return path.Join(prefix, dataDirName)
}
+// IndexDataDirPath returns the path to the index data directory belonging to a db
+func IndexDataDirPath(prefix string) string {
+ return path.Join(prefix, indexDirName, dataDirName)
+}
+
// SnapshotDirPath returns the path to the snapshot directory belong to a db
func SnapshotDirPath(prefix string) string {
return path.Join(prefix, snapshotDirName)
@@ -1405,17 +1480,32 @@ func DataFileSetExists(
}
// SnapshotFileSetExistsAt determines whether snapshot fileset files exist for the given namespace, shard, and block start time.
-func SnapshotFileSetExistsAt(prefix string, namespace ident.ID, shard uint32, blockStart time.Time) (bool, error) {
+func SnapshotFileSetExistsAt(
+ prefix string,
+ namespace ident.ID,
+ snapshotID uuid.UUID,
+ shard uint32,
+ blockStart time.Time,
+) (bool, error) {
snapshotFiles, err := SnapshotFiles(prefix, namespace, shard)
if err != nil {
return false, err
}
- _, ok := snapshotFiles.LatestVolumeForBlock(blockStart)
+ latest, ok := snapshotFiles.LatestVolumeForBlock(blockStart)
if !ok {
return false, nil
}
+ _, latestSnapshotID, err := latest.SnapshotTimeAndID()
+ if err != nil {
+ return false, err
+ }
+
+ if !uuid.Equal(latestSnapshotID, snapshotID) {
+ return false, nil
+ }
+
// LatestVolumeForBlock checks for a complete checkpoint file, so we don't
// need to recheck it here.
return true, nil
@@ -1563,13 +1653,17 @@ func filesetFileForTime(t time.Time, suffix string) string {
return fmt.Sprintf("%s%s%d%s%s%s", filesetFilePrefix, separator, t.UnixNano(), separator, suffix, fileSuffix)
}
+func filesetFileForTimeAndVolumeIndex(t time.Time, index int, suffix string) string {
+ newSuffix := fmt.Sprintf("%d%s%s", index, separator, suffix)
+ return filesetFileForTime(t, newSuffix)
+}
+
func filesetPathFromTimeLegacy(prefix string, t time.Time, suffix string) string {
return path.Join(prefix, filesetFileForTime(t, suffix))
}
func filesetPathFromTimeAndIndex(prefix string, t time.Time, index int, suffix string) string {
- newSuffix := fmt.Sprintf("%d%s%s", index, separator, suffix)
- return path.Join(prefix, filesetFileForTime(t, newSuffix))
+ return path.Join(prefix, filesetFileForTimeAndVolumeIndex(t, index, suffix))
}
// isFirstVolumeLegacy returns whether the first volume of the provided type is
diff --git a/src/dbnode/persist/fs/files_test.go b/src/dbnode/persist/fs/files_test.go
index b0157f9db7..ea3337a355 100644
--- a/src/dbnode/persist/fs/files_test.go
+++ b/src/dbnode/persist/fs/files_test.go
@@ -203,7 +203,9 @@ func TestForEachInfoFile(t *testing.T) {
shard: shard,
},
testReaderBufferSize,
- func(fname string, _ FileSetFileIdentifier, data []byte) {
+ func(file FileSetFile, data []byte) {
+ fname, ok := file.InfoFilePath()
+ require.True(t, ok)
fnames = append(fnames, fname)
res = append(res, data...)
})
@@ -503,6 +505,38 @@ func TestFileSetAt(t *testing.T) {
}
}
+func TestFileSetAtNonLegacy(t *testing.T) {
+ shard := uint32(0)
+ numIters := 20
+ dir := createDataFiles(t, dataDirName, testNs1ID, shard, numIters, true, checkpointFileSuffix)
+ defer os.RemoveAll(dir)
+
+ for i := 0; i < numIters; i++ {
+ timestamp := time.Unix(0, int64(i))
+ res, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, 0)
+ require.NoError(t, err)
+ require.True(t, ok)
+ require.Equal(t, timestamp, res.ID.BlockStart)
+ }
+}
+
+func TestFileSetAtNotFirstVolumeIndex(t *testing.T) {
+ shard := uint32(0)
+ numIters := 20
+ volumeIndex := 1
+ dir := createDataFilesWithVolumeIndex(t, dataDirName, testNs1ID, shard, numIters, true,
+ checkpointFileSuffix, volumeIndex)
+ defer os.RemoveAll(dir)
+
+ for i := 0; i < numIters; i++ {
+ timestamp := time.Unix(0, int64(i))
+ res, ok, err := FileSetAt(dir, testNs1ID, shard, timestamp, volumeIndex)
+ require.NoError(t, err)
+ require.True(t, ok)
+ require.Equal(t, timestamp, res.ID.BlockStart)
+ }
+}
+
func TestFileSetAtIgnoresWithoutCheckpoint(t *testing.T) {
shard := uint32(0)
numIters := 20
@@ -840,7 +874,7 @@ func TestSnapshotFileHasCompleteCheckpointFile(t *testing.T) {
// Check validates a valid checkpoint file
f := FileSetFile{
- AbsoluteFilepaths: []string{checkpointFilePath},
+ AbsoluteFilePaths: []string{checkpointFilePath},
}
require.Equal(t, true, f.HasCompleteCheckpointFile())
@@ -848,14 +882,14 @@ func TestSnapshotFileHasCompleteCheckpointFile(t *testing.T) {
err = ioutil.WriteFile(checkpointFilePath, []byte{42}, defaultNewFileMode)
require.NoError(t, err)
f = FileSetFile{
- AbsoluteFilepaths: []string{checkpointFilePath},
+ AbsoluteFilePaths: []string{checkpointFilePath},
}
require.Equal(t, false, f.HasCompleteCheckpointFile())
// Check ignores index file path
indexFilePath := path.Join(dir, "123-index-0.db")
f = FileSetFile{
- AbsoluteFilepaths: []string{indexFilePath},
+ AbsoluteFilePaths: []string{indexFilePath},
}
require.Equal(t, false, f.HasCompleteCheckpointFile())
}
@@ -887,7 +921,7 @@ func TestSnapshotFileSetExistsAt(t *testing.T) {
writeOutTestSnapshot(t, dir, shard, ts, 0)
- exists, err := SnapshotFileSetExistsAt(dir, testNs1ID, shard, ts)
+ exists, err := SnapshotFileSetExistsAt(dir, testNs1ID, testSnapshotID, shard, ts)
require.NoError(t, err)
require.True(t, exists)
}
@@ -1113,7 +1147,7 @@ func TestSnapshotFileSnapshotTimeAndIDZeroValue(t *testing.T) {
func TestSnapshotFileSnapshotTimeAndIDNotSnapshot(t *testing.T) {
f := FileSetFile{}
- f.AbsoluteFilepaths = []string{"/var/lib/m3db/data/fileset-data.db"}
+ f.AbsoluteFilePaths = []string{"/var/lib/m3db/data/fileset-data.db"}
_, _, err := f.SnapshotTimeAndID()
require.Error(t, err)
}
@@ -1163,8 +1197,8 @@ func createDataCheckpointFiles(t *testing.T, subDirName string, namespace ident.
return createDataFiles(t, subDirName, namespace, shard, iter, isSnapshot, checkpointFileSuffix)
}
-func createDataFiles(t *testing.T,
- subDirName string, namespace ident.ID, shard uint32, iter int, isSnapshot bool, fileSuffix string,
+func createDataFilesWithVolumeIndex(t *testing.T,
+ subDirName string, namespace ident.ID, shard uint32, iter int, isSnapshot bool, fileSuffix string, volumeIndex int,
) string {
dir := createTempDir(t)
shardDir := path.Join(dir, subDirName, namespace.String(), strconv.Itoa(int(shard)))
@@ -1173,7 +1207,7 @@ func createDataFiles(t *testing.T,
ts := time.Unix(0, int64(i))
var infoFilePath string
if isSnapshot {
- infoFilePath = filesetPathFromTimeAndIndex(shardDir, ts, 0, fileSuffix)
+ infoFilePath = filesetPathFromTimeAndIndex(shardDir, ts, volumeIndex, fileSuffix)
} else {
infoFilePath = filesetPathFromTimeLegacy(shardDir, ts, fileSuffix)
}
@@ -1191,6 +1225,12 @@ func createDataFiles(t *testing.T,
return dir
}
+func createDataFiles(t *testing.T,
+ subDirName string, namespace ident.ID, shard uint32, iter int, isSnapshot bool, fileSuffix string,
+) string {
+ return createDataFilesWithVolumeIndex(t, subDirName, namespace, shard, iter, isSnapshot, fileSuffix, 0)
+}
+
type indexFileSetFileIdentifier struct {
FileSetFileIdentifier
Suffix string
diff --git a/src/dbnode/persist/fs/fs_mock.go b/src/dbnode/persist/fs/fs_mock.go
index 35262a2d68..084b8e2afc 100644
--- a/src/dbnode/persist/fs/fs_mock.go
+++ b/src/dbnode/persist/fs/fs_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/persist/fs (interfaces: DataFileSetWriter,DataFileSetReader,DataFileSetSeeker,IndexFileSetWriter,IndexFileSetReader,IndexSegmentFileSetWriter,IndexSegmentFileSet,IndexSegmentFile,SnapshotMetadataFileWriter,DataFileSetSeekerManager,ConcurrentDataFileSetSeeker,MergeWith)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -30,8 +30,10 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/namespace"
+ persist "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/x/xio"
- "github.com/m3db/m3/src/m3ninx/persist"
+ persist0 "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
@@ -77,6 +79,21 @@ func (mr *MockDataFileSetWriterMockRecorder) Close() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDataFileSetWriter)(nil).Close))
}
+// DeferClose mocks base method
+func (m *MockDataFileSetWriter) DeferClose() (persist.DataCloser, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeferClose")
+ ret0, _ := ret[0].(persist.DataCloser)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// DeferClose indicates an expected call of DeferClose
+func (mr *MockDataFileSetWriterMockRecorder) DeferClose() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeferClose", reflect.TypeOf((*MockDataFileSetWriter)(nil).DeferClose))
+}
+
// Open mocks base method
func (m *MockDataFileSetWriter) Open(arg0 DataWriterOpenOptions) error {
m.ctrl.T.Helper()
@@ -92,31 +109,31 @@ func (mr *MockDataFileSetWriterMockRecorder) Open(arg0 interface{}) *gomock.Call
}
// Write mocks base method
-func (m *MockDataFileSetWriter) Write(arg0 ident.ID, arg1 ident.Tags, arg2 checked.Bytes, arg3 uint32) error {
+func (m *MockDataFileSetWriter) Write(arg0 persist.Metadata, arg1 checked.Bytes, arg2 uint32) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Write", arg0, arg1, arg2, arg3)
+ ret := m.ctrl.Call(m, "Write", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Write indicates an expected call of Write
-func (mr *MockDataFileSetWriterMockRecorder) Write(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockDataFileSetWriterMockRecorder) Write(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockDataFileSetWriter)(nil).Write), arg0, arg1, arg2, arg3)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockDataFileSetWriter)(nil).Write), arg0, arg1, arg2)
}
// WriteAll mocks base method
-func (m *MockDataFileSetWriter) WriteAll(arg0 ident.ID, arg1 ident.Tags, arg2 []checked.Bytes, arg3 uint32) error {
+func (m *MockDataFileSetWriter) WriteAll(arg0 persist.Metadata, arg1 []checked.Bytes, arg2 uint32) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "WriteAll", arg0, arg1, arg2, arg3)
+ ret := m.ctrl.Call(m, "WriteAll", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// WriteAll indicates an expected call of WriteAll
-func (mr *MockDataFileSetWriterMockRecorder) WriteAll(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+func (mr *MockDataFileSetWriterMockRecorder) WriteAll(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteAll", reflect.TypeOf((*MockDataFileSetWriter)(nil).WriteAll), arg0, arg1, arg2, arg3)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteAll", reflect.TypeOf((*MockDataFileSetWriter)(nil).WriteAll), arg0, arg1, arg2)
}
// MockDataFileSetReader is a mock of DataFileSetReader interface
@@ -524,7 +541,7 @@ func (mr *MockIndexFileSetWriterMockRecorder) Open(arg0 interface{}) *gomock.Cal
}
// WriteSegmentFileSet mocks base method
-func (m *MockIndexFileSetWriter) WriteSegmentFileSet(arg0 persist.IndexSegmentFileSetWriter) error {
+func (m *MockIndexFileSetWriter) WriteSegmentFileSet(arg0 persist0.IndexSegmentFileSetWriter) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteSegmentFileSet", arg0)
ret0, _ := ret[0].(error)
@@ -574,6 +591,20 @@ func (mr *MockIndexFileSetReaderMockRecorder) Close() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIndexFileSetReader)(nil).Close))
}
+// IndexVolumeType mocks base method
+func (m *MockIndexFileSetReader) IndexVolumeType() persist0.IndexVolumeType {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IndexVolumeType")
+ ret0, _ := ret[0].(persist0.IndexVolumeType)
+ return ret0
+}
+
+// IndexVolumeType indicates an expected call of IndexVolumeType
+func (mr *MockIndexFileSetReaderMockRecorder) IndexVolumeType() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexVolumeType", reflect.TypeOf((*MockIndexFileSetReader)(nil).IndexVolumeType))
+}
+
// Open mocks base method
func (m *MockIndexFileSetReader) Open(arg0 IndexReaderOpenOptions) (IndexReaderOpenResult, error) {
m.ctrl.T.Helper()
@@ -590,10 +621,10 @@ func (mr *MockIndexFileSetReaderMockRecorder) Open(arg0 interface{}) *gomock.Cal
}
// ReadSegmentFileSet mocks base method
-func (m *MockIndexFileSetReader) ReadSegmentFileSet() (persist.IndexSegmentFileSet, error) {
+func (m *MockIndexFileSetReader) ReadSegmentFileSet() (persist0.IndexSegmentFileSet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadSegmentFileSet")
- ret0, _ := ret[0].(persist.IndexSegmentFileSet)
+ ret0, _ := ret[0].(persist0.IndexSegmentFileSet)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -656,10 +687,10 @@ func (m *MockIndexSegmentFileSetWriter) EXPECT() *MockIndexSegmentFileSetWriterM
}
// Files mocks base method
-func (m *MockIndexSegmentFileSetWriter) Files() []persist.IndexSegmentFileType {
+func (m *MockIndexSegmentFileSetWriter) Files() []persist0.IndexSegmentFileType {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Files")
- ret0, _ := ret[0].([]persist.IndexSegmentFileType)
+ ret0, _ := ret[0].([]persist0.IndexSegmentFileType)
return ret0
}
@@ -712,10 +743,10 @@ func (mr *MockIndexSegmentFileSetWriterMockRecorder) SegmentMetadata() *gomock.C
}
// SegmentType mocks base method
-func (m *MockIndexSegmentFileSetWriter) SegmentType() persist.IndexSegmentType {
+func (m *MockIndexSegmentFileSetWriter) SegmentType() persist0.IndexSegmentType {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SegmentType")
- ret0, _ := ret[0].(persist.IndexSegmentType)
+ ret0, _ := ret[0].(persist0.IndexSegmentType)
return ret0
}
@@ -726,7 +757,7 @@ func (mr *MockIndexSegmentFileSetWriterMockRecorder) SegmentType() *gomock.Call
}
// WriteFile mocks base method
-func (m *MockIndexSegmentFileSetWriter) WriteFile(arg0 persist.IndexSegmentFileType, arg1 io.Writer) error {
+func (m *MockIndexSegmentFileSetWriter) WriteFile(arg0 persist0.IndexSegmentFileType, arg1 io.Writer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteFile", arg0, arg1)
ret0, _ := ret[0].(error)
@@ -763,10 +794,10 @@ func (m *MockIndexSegmentFileSet) EXPECT() *MockIndexSegmentFileSetMockRecorder
}
// Files mocks base method
-func (m *MockIndexSegmentFileSet) Files() []persist.IndexSegmentFile {
+func (m *MockIndexSegmentFileSet) Files() []persist0.IndexSegmentFile {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Files")
- ret0, _ := ret[0].([]persist.IndexSegmentFile)
+ ret0, _ := ret[0].([]persist0.IndexSegmentFile)
return ret0
}
@@ -819,10 +850,10 @@ func (mr *MockIndexSegmentFileSetMockRecorder) SegmentMetadata() *gomock.Call {
}
// SegmentType mocks base method
-func (m *MockIndexSegmentFileSet) SegmentType() persist.IndexSegmentType {
+func (m *MockIndexSegmentFileSet) SegmentType() persist0.IndexSegmentType {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SegmentType")
- ret0, _ := ret[0].(persist.IndexSegmentType)
+ ret0, _ := ret[0].(persist0.IndexSegmentType)
return ret0
}
@@ -856,10 +887,10 @@ func (m *MockIndexSegmentFile) EXPECT() *MockIndexSegmentFileMockRecorder {
}
// Files mocks base method
-func (m *MockIndexSegmentFile) Files() []persist.IndexSegmentFile {
+func (m *MockIndexSegmentFile) Files() []persist0.IndexSegmentFile {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Files")
- ret0, _ := ret[0].([]persist.IndexSegmentFile)
+ ret0, _ := ret[0].([]persist0.IndexSegmentFile)
return ret0
}
@@ -912,10 +943,10 @@ func (mr *MockIndexSegmentFileMockRecorder) SegmentMetadata() *gomock.Call {
}
// SegmentType mocks base method
-func (m *MockIndexSegmentFile) SegmentType() persist.IndexSegmentType {
+func (m *MockIndexSegmentFile) SegmentType() persist0.IndexSegmentType {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SegmentType")
- ret0, _ := ret[0].(persist.IndexSegmentType)
+ ret0, _ := ret[0].(persist0.IndexSegmentType)
return ret0
}
@@ -985,6 +1016,18 @@ func (m *MockDataFileSetSeekerManager) EXPECT() *MockDataFileSetSeekerManagerMoc
return m.recorder
}
+// AssignShardSet mocks base method
+func (m *MockDataFileSetSeekerManager) AssignShardSet(arg0 sharding.ShardSet) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "AssignShardSet", arg0)
+}
+
+// AssignShardSet indicates an expected call of AssignShardSet
+func (mr *MockDataFileSetSeekerManagerMockRecorder) AssignShardSet(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssignShardSet", reflect.TypeOf((*MockDataFileSetSeekerManager)(nil).AssignShardSet), arg0)
+}
+
// Borrow mocks base method
func (m *MockDataFileSetSeekerManager) Borrow(arg0 uint32, arg1 time.Time) (ConcurrentDataFileSetSeeker, error) {
m.ctrl.T.Helper()
@@ -1029,17 +1072,17 @@ func (mr *MockDataFileSetSeekerManagerMockRecorder) Close() *gomock.Call {
}
// Open mocks base method
-func (m *MockDataFileSetSeekerManager) Open(arg0 namespace.Metadata) error {
+func (m *MockDataFileSetSeekerManager) Open(arg0 namespace.Metadata, arg1 sharding.ShardSet) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Open", arg0)
+ ret := m.ctrl.Call(m, "Open", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Open indicates an expected call of Open
-func (mr *MockDataFileSetSeekerManagerMockRecorder) Open(arg0 interface{}) *gomock.Call {
+func (mr *MockDataFileSetSeekerManagerMockRecorder) Open(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Open", reflect.TypeOf((*MockDataFileSetSeekerManager)(nil).Open), arg0)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Open", reflect.TypeOf((*MockDataFileSetSeekerManager)(nil).Open), arg0, arg1)
}
// Return mocks base method
diff --git a/src/dbnode/persist/fs/index_lookup_prop_test.go b/src/dbnode/persist/fs/index_lookup_prop_test.go
index 97e40bafc7..8daabd66bd 100644
--- a/src/dbnode/persist/fs/index_lookup_prop_test.go
+++ b/src/dbnode/persist/fs/index_lookup_prop_test.go
@@ -32,6 +32,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/digest"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
@@ -164,7 +165,9 @@ func calculateExpectedChecksum(t *testing.T, filePath string) uint32 {
func writeTestSummariesData(w DataFileSetWriter, writes []generatedWrite) error {
for _, write := range writes {
- err := w.Write(write.id, write.tags, write.data, write.checksum)
+ metadata := persist.NewMetadataFromIDAndTags(write.id, write.tags,
+ persist.MetadataOptions{})
+ err := w.Write(metadata, write.data, write.checksum)
if err != nil {
return err
}
diff --git a/src/dbnode/persist/fs/index_read.go b/src/dbnode/persist/fs/index_read.go
index 02940e301c..88712236dc 100644
--- a/src/dbnode/persist/fs/index_read.go
+++ b/src/dbnode/persist/fs/index_read.go
@@ -53,7 +53,7 @@ type indexReader struct {
volumeIndex int
currIdx int
- info index.IndexInfo
+ info index.IndexVolumeInfo
expectedDigest index.IndexDigests
expectedDigestOfDigest uint32
readDigests indexReaderReadDigests
@@ -259,12 +259,18 @@ func (r *indexReader) ReadSegmentFileSet() (
file := newReadableIndexSegmentFileMmap(segFileType, fd, desc)
result.files = append(result.files, file)
- digests.files = append(digests.files, indexReaderReadSegmentFileDigest{
- segmentFileType: segFileType,
- digest: digest.Checksum(desc.Bytes),
- })
- // NB(bodu): Free mmaped bytes after we take the checksum so we don't get memory spikes at bootstrap time.
+ if r.opts.IndexReaderAutovalidateIndexSegments() {
+ // Only checksum the file if we are autovalidating the index
+ // segments on open.
+ digests.files = append(digests.files, indexReaderReadSegmentFileDigest{
+ segmentFileType: segFileType,
+ digest: digest.Checksum(desc.Bytes),
+ })
+ }
+
+ // NB(bodu): Free mmaped bytes after we take the checksum so we don't
+ // get memory spikes at bootstrap time.
if err := mmap.MadviseDontNeed(desc); err != nil {
return nil, err
}
@@ -283,6 +289,10 @@ func (r *indexReader) Validate() error {
if err := r.validateInfoFileDigest(); err != nil {
return err
}
+ if !r.opts.IndexReaderAutovalidateIndexSegments() {
+ // Do not validate on segment open.
+ return nil
+ }
for i, segment := range r.info.Segments {
for j := range segment.Files {
if err := r.validateSegmentFileDigest(i, j); err != nil {
@@ -345,6 +355,13 @@ func (r *indexReader) validateSegmentFileDigest(segmentIdx, fileIdx int) error {
return nil
}
+func (r *indexReader) IndexVolumeType() idxpersist.IndexVolumeType {
+ if r.info.IndexVolumeType == nil {
+ return idxpersist.DefaultIndexVolumeType
+ }
+ return idxpersist.IndexVolumeType(r.info.IndexVolumeType.Value)
+}
+
func (r *indexReader) Close() error {
r.reset(r.opts)
return nil
diff --git a/src/dbnode/persist/fs/index_read_write_test.go b/src/dbnode/persist/fs/index_read_write_test.go
index bd5709863f..f2049a1260 100644
--- a/src/dbnode/persist/fs/index_read_write_test.go
+++ b/src/dbnode/persist/fs/index_read_write_test.go
@@ -24,6 +24,7 @@ import (
"bufio"
"bytes"
"crypto/rand"
+ "encoding/json"
"io"
"io/ioutil"
"os"
@@ -83,7 +84,41 @@ func (s indexWriteTestSetup) cleanup() {
os.RemoveAll(s.rootDir)
}
+type testIndexReadWriteOptions struct {
+ IndexReaderOptions testIndexReaderOptions
+}
+
func TestIndexSimpleReadWrite(t *testing.T) {
+ tests := []struct {
+ TestOptions testIndexReadWriteOptions
+ }{
+ {
+ TestOptions: testIndexReadWriteOptions{
+ IndexReaderOptions: testIndexReaderOptions{
+ AutovalidateIndexSegments: true,
+ },
+ },
+ },
+ {
+ TestOptions: testIndexReadWriteOptions{
+ IndexReaderOptions: testIndexReaderOptions{
+ AutovalidateIndexSegments: true,
+ },
+ },
+ },
+ }
+
+ for _, test := range tests {
+ test := test
+ name, err := json.Marshal(test)
+ require.NoError(t, err)
+ t.Run(string(name), func(t *testing.T) {
+ testIndexSimpleReadWrite(t, test.TestOptions)
+ })
+ }
+}
+
+func testIndexSimpleReadWrite(t *testing.T, testOpts testIndexReadWriteOptions) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -125,7 +160,8 @@ func TestIndexSimpleReadWrite(t *testing.T) {
err = writer.Close()
require.NoError(t, err)
- reader := newTestIndexReader(t, test.filePathPrefix)
+ reader := newTestIndexReader(t, test.filePathPrefix,
+ testOpts.IndexReaderOptions)
result, err := reader.Open(IndexReaderOpenOptions{
Identifier: test.fileSetID,
FileSetType: persist.FileSetFlushType,
@@ -150,9 +186,18 @@ func newTestIndexWriter(t *testing.T, filePathPrefix string) IndexFileSetWriter
return writer
}
-func newTestIndexReader(t *testing.T, filePathPrefix string) IndexFileSetReader {
+type testIndexReaderOptions struct {
+ AutovalidateIndexSegments bool
+}
+
+func newTestIndexReader(
+ t *testing.T,
+ filePathPrefix string,
+ opts testIndexReaderOptions,
+) IndexFileSetReader {
reader, err := NewIndexReader(testDefaultOpts.
- SetFilePathPrefix(filePathPrefix))
+ SetFilePathPrefix(filePathPrefix).
+ SetIndexReaderAutovalidateIndexSegments(opts.AutovalidateIndexSegments))
require.NoError(t, err)
return reader
}
diff --git a/src/dbnode/persist/fs/index_write.go b/src/dbnode/persist/fs/index_write.go
index 1cc3e603ad..b042d5dd39 100644
--- a/src/dbnode/persist/fs/index_write.go
+++ b/src/dbnode/persist/fs/index_write.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -33,6 +33,8 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
idxpersist "github.com/m3db/m3/src/m3ninx/persist"
xerrors "github.com/m3db/m3/src/x/errors"
+
+ protobuftypes "github.com/gogo/protobuf/types"
)
const (
@@ -58,14 +60,15 @@ type indexWriter struct {
newDirectoryMode os.FileMode
fdWithDigest digest.FdWithDigestWriter
- err error
- blockSize time.Duration
- start time.Time
- fileSetType persist.FileSetType
- snapshotTime time.Time
- volumeIndex int
- shards map[uint32]struct{}
- segments []writtenIndexSegment
+ err error
+ blockSize time.Duration
+ start time.Time
+ fileSetType persist.FileSetType
+ snapshotTime time.Time
+ volumeIndex int
+ indexVolumeType idxpersist.IndexVolumeType
+ shards map[uint32]struct{}
+ segments []writtenIndexSegment
namespaceDir string
checkpointFilePath string
@@ -116,6 +119,10 @@ func (w *indexWriter) Open(opts IndexWriterOpenOptions) error {
w.volumeIndex = opts.Identifier.VolumeIndex
w.shards = opts.Shards
w.snapshotTime = opts.Snapshot.SnapshotTime
+ w.indexVolumeType = opts.IndexVolumeType
+ if w.indexVolumeType == "" {
+ w.indexVolumeType = idxpersist.DefaultIndexVolumeType
+ }
w.segments = nil
switch opts.FileSetType {
@@ -230,13 +237,16 @@ func (w *indexWriter) infoFileData() ([]byte, error) {
for shard := range w.shards {
shards = append(shards, shard)
}
- info := &index.IndexInfo{
+ info := &index.IndexVolumeInfo{
MajorVersion: indexFileSetMajorVersion,
BlockStart: w.start.UnixNano(),
BlockSize: int64(w.blockSize),
FileType: int64(w.fileSetType),
Shards: shards,
SnapshotTime: w.snapshotTime.UnixNano(),
+ IndexVolumeType: &protobuftypes.StringValue{
+ Value: string(w.indexVolumeType),
+ },
}
for _, segment := range w.segments {
segmentInfo := &index.SegmentInfo{
diff --git a/src/dbnode/persist/fs/index_write_test.go b/src/dbnode/persist/fs/index_write_test.go
index b8384481bb..a5431b990d 100644
--- a/src/dbnode/persist/fs/index_write_test.go
+++ b/src/dbnode/persist/fs/index_write_test.go
@@ -130,7 +130,8 @@ func TestSnapshotIndexWriter(t *testing.T) {
}, actualFiles)
// Verify can read them
- reader := newTestIndexReader(t, test.filePathPrefix)
+ reader := newTestIndexReader(t, test.filePathPrefix,
+ testIndexReaderOptions{})
for _, snapshot := range testSnapshotSegments {
// Add the snapshot index to the file set ID
fileSetID := test.fileSetID
diff --git a/src/dbnode/persist/fs/merger.go b/src/dbnode/persist/fs/merger.go
index 877a2d3f59..555bbcc6d8 100644
--- a/src/dbnode/persist/fs/merger.go
+++ b/src/dbnode/persist/fs/merger.go
@@ -21,22 +21,25 @@
package fs
import (
+ "errors"
"io"
"time"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
- "github.com/m3db/m3/src/dbnode/storage/index/convert"
+ "github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
)
+var errMergeAndCleanupNotSupported = errors.New("function MergeAndCleanup not supported outside of bootstrapping")
+
type merger struct {
reader DataFileSetReader
blockAllocSize int
@@ -46,6 +49,7 @@ type merger struct {
encoderPool encoding.EncoderPool
contextPool context.Pool
nsOpts namespace.Options
+ filePathPrefix string
}
// NewMerger returns a new Merger. This implementation is in charge of merging
@@ -65,6 +69,7 @@ func NewMerger(
identPool ident.Pool,
encoderPool encoding.EncoderPool,
contextPool context.Pool,
+ filePathPrefix string,
nsOpts namespace.Options,
) Merger {
return &merger{
@@ -76,6 +81,7 @@ func NewMerger(
encoderPool: encoderPool,
contextPool: contextPool,
nsOpts: nsOpts,
+ filePathPrefix: filePathPrefix,
}
}
@@ -88,13 +94,13 @@ func (m *merger) Merge(
nextVolumeIndex int,
flushPreparer persist.FlushPreparer,
nsCtx namespace.Context,
-) (err error) {
+ onFlush persist.OnFlushSeries,
+) (persist.DataCloser, error) {
var (
reader = m.reader
blockAllocSize = m.blockAllocSize
srPool = m.srPool
multiIterPool = m.multiIterPool
- identPool = m.identPool
encoderPool = m.encoderPool
nsOpts = m.nsOpts
@@ -113,10 +119,12 @@ func (m *merger) Merge(
},
FileSetType: persist.FileSetFlushType,
}
+ closer persist.DataCloser
+ err error
)
if err := reader.Open(openOpts); err != nil {
- return err
+ return closer, err
}
defer func() {
// Only set the error here if not set by the end of the function, since
@@ -128,7 +136,7 @@ func (m *merger) Merge(
nsMd, err := namespace.NewMetadata(nsID, nsOpts)
if err != nil {
- return err
+ return closer, err
}
prepareOpts := persist.DataPrepareOptions{
NamespaceMetadata: nsMd,
@@ -140,7 +148,7 @@ func (m *merger) Merge(
}
prepared, err := flushPreparer.PrepareData(prepareOpts)
if err != nil {
- return err
+ return closer, err
}
var (
@@ -155,19 +163,6 @@ func (m *merger) Merge(
multiIter = multiIterPool.Get()
ctx = m.contextPool.Get()
- // We keep track of IDs/tags to finalize at the end of merging. This
- // only applies to those that come from disk Reads, since the whole
- // lifecycle of those IDs/tags are contained to this function. We don't
- // want finalize the IDs from memory since other components may have
- // ownership over it.
- //
- // We must only finalize these at the end of this function, since the
- // flush preparer's underlying writer holds on to those references
- // until it is closed (closing the PreparedDataPersist at the end of
- // this merge closes the underlying writer).
- idsToFinalize = make([]ident.ID, 0, reader.Entries())
- tagsToFinalize = make([]ident.Tags, 0, reader.Entries())
-
// Shared between iterations.
iterResources = newIterResources(
multiIter,
@@ -180,17 +175,11 @@ func (m *merger) Merge(
defer func() {
segReader.Finalize()
multiIter.Close()
- for _, res := range idsToFinalize {
- res.Finalize()
- }
- for _, res := range tagsToFinalize {
- res.Finalize()
- }
}()
// The merge is performed in two stages. The first stage is to loop through
// series on disk and merge it with what's in the merge target. Looping
- // through disk in the first stage is done intentionally to read disk
+ // through disk in the first stage is prepared intentionally to read disk
// sequentially to optimize for spinning disk access. The second stage is to
// persist the rest of the series in the merge target that were not
// persisted in the first stage.
@@ -198,31 +187,30 @@ func (m *merger) Merge(
// First stage: loop through series on disk.
for id, tagsIter, data, checksum, err := reader.Read(); err != io.EOF; id, tagsIter, data, checksum, err = reader.Read() {
if err != nil {
- return err
+ return closer, err
}
- idsToFinalize = append(idsToFinalize, id)
segmentReaders = segmentReaders[:0]
- segmentReaders = append(segmentReaders, segmentReaderFromData(data, segReader))
+ seg := segmentReaderFromData(data, checksum, segReader)
+ segmentReaders = append(segmentReaders, seg)
// Check if this series is in memory (and thus requires merging).
ctx.Reset()
mergeWithData, hasInMemoryData, err := mergeWith.Read(ctx, id, blockStart, nsCtx)
if err != nil {
- return err
+ return closer, err
}
if hasInMemoryData {
segmentReaders = appendBlockReadersToSegmentReaders(segmentReaders, mergeWithData)
}
- // tagsIter is never nil. These tags will be valid as long as the IDs
- // are valid, and the IDs are valid for the duration of the file writing.
- tags, err := convert.TagsFromTagsIter(id, tagsIter, identPool)
- tagsIter.Close()
- if err != nil {
- return err
- }
- tagsToFinalize = append(tagsToFinalize, tags)
+ // Inform the writer to finalize the ID and tag iterator once
+ // the volume is written.
+ metadata := persist.NewMetadataFromIDAndTagIterator(id, tagsIter,
+ persist.MetadataOptions{
+ FinalizeID: true,
+ FinalizeTagIterator: true,
+ })
// In the special (but common) case that we're just copying the series data from the old file
// into the new one without merging or adding any additional data we can avoid recalculating
@@ -230,15 +218,15 @@ func (m *merger) Merge(
if len(segmentReaders) == 1 && hasInMemoryData == false {
segment, err := segmentReaders[0].Segment()
if err != nil {
- return err
+ return closer, err
}
- if err := persistSegmentWithChecksum(id, tags, segment, checksum, prepared.Persist); err != nil {
- return err
+ if err := persistSegmentWithChecksum(metadata, segment, checksum, prepared.Persist); err != nil {
+ return closer, err
}
} else {
- if err := persistSegmentReaders(id, tags, segmentReaders, iterResources, prepared.Persist); err != nil {
- return err
+ if err := persistSegmentReaders(metadata, segmentReaders, iterResources, prepared.Persist); err != nil {
+ return closer, err
}
}
// Closing the context will finalize the data returned from
@@ -252,10 +240,22 @@ func (m *merger) Merge(
ctx.Reset()
err = mergeWith.ForEachRemaining(
ctx, blockStart,
- func(id ident.ID, tags ident.Tags, mergeWithData []xio.BlockReader) error {
+ func(seriesMetadata doc.Document, mergeWithData block.FetchBlockResult) error {
segmentReaders = segmentReaders[:0]
- segmentReaders = appendBlockReadersToSegmentReaders(segmentReaders, mergeWithData)
- err := persistSegmentReaders(id, tags, segmentReaders, iterResources, prepared.Persist)
+ segmentReaders = appendBlockReadersToSegmentReaders(segmentReaders, mergeWithData.Blocks)
+
+ metadata := persist.NewMetadata(seriesMetadata)
+ err := persistSegmentReaders(metadata, segmentReaders, iterResources, prepared.Persist)
+
+ if err == nil {
+ err = onFlush.OnFlushNewSeries(persist.OnFlushNewSeriesEvent{
+ Shard: shard,
+ BlockStart: startTime,
+ FirstWrite: mergeWithData.FirstWrite,
+ SeriesMetadata: seriesMetadata,
+ })
+ }
+
// Context is safe to close after persisting data to disk.
// Reset context here within the passed in function so that the
// context gets reset for each remaining series instead of getting
@@ -264,13 +264,37 @@ func (m *merger) Merge(
ctx.BlockingCloseReset()
return err
}, nsCtx)
+ if err != nil {
+ return closer, err
+ }
+
+ // NB(bodu): Return a deferred closer so that we can guarantee that cold index writes are persisted first.
+ return prepared.DeferClose()
+}
+
+func (m *merger) MergeAndCleanup(
+ fileID FileSetFileIdentifier,
+ mergeWith MergeWith,
+ nextVolumeIndex int,
+ flushPreparer persist.FlushPreparer,
+ nsCtx namespace.Context,
+ onFlush persist.OnFlushSeries,
+ isBootstrapped bool,
+) error {
+ if isBootstrapped {
+ return errMergeAndCleanupNotSupported
+ }
+
+ close, err := m.Merge(fileID, mergeWith, nextVolumeIndex, flushPreparer, nsCtx, onFlush)
if err != nil {
return err
}
- // Close the flush preparer, which writes the rest of the files in the
- // fileset.
- return prepared.Close()
+ if err = close(); err != nil {
+ return err
+ }
+
+ return DeleteFileSetAt(m.filePathPrefix, fileID.Namespace, fileID.Shard, fileID.BlockStart, fileID.VolumeIndex)
}
func appendBlockReadersToSegmentReaders(segReaders []xio.SegmentReader, brs []xio.BlockReader) []xio.SegmentReader {
@@ -282,16 +306,16 @@ func appendBlockReadersToSegmentReaders(segReaders []xio.SegmentReader, brs []xi
func segmentReaderFromData(
data checked.Bytes,
+ checksum uint32,
segReader xio.SegmentReader,
) xio.SegmentReader {
- seg := ts.NewSegment(data, nil, ts.FinalizeHead)
+ seg := ts.NewSegment(data, nil, checksum, ts.FinalizeHead)
segReader.Reset(seg)
return segReader
}
func persistSegmentReaders(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
segReaders []xio.SegmentReader,
ir iterResources,
persistFn persist.DataFn,
@@ -301,15 +325,14 @@ func persistSegmentReaders(
}
if len(segReaders) == 1 {
- return persistSegmentReader(id, tags, segReaders[0], persistFn)
+ return persistSegmentReader(metadata, segReaders[0], persistFn)
}
- return persistIter(id, tags, segReaders, ir, persistFn)
+ return persistIter(metadata, segReaders, ir, persistFn)
}
func persistIter(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
segReaders []xio.SegmentReader,
ir iterResources,
persistFn persist.DataFn,
@@ -330,12 +353,11 @@ func persistIter(
}
segment := encoder.Discard()
- return persistSegment(id, tags, segment, persistFn)
+ return persistSegment(metadata, segment, persistFn)
}
func persistSegmentReader(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
segmentReader xio.SegmentReader,
persistFn persist.DataFn,
) error {
@@ -343,27 +365,25 @@ func persistSegmentReader(
if err != nil {
return err
}
- return persistSegment(id, tags, segment, persistFn)
+ return persistSegment(metadata, segment, persistFn)
}
func persistSegment(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
segment ts.Segment,
persistFn persist.DataFn,
) error {
- checksum := digest.SegmentChecksum(segment)
- return persistFn(id, tags, segment, checksum)
+ checksum := segment.CalculateChecksum()
+ return persistFn(metadata, segment, checksum)
}
func persistSegmentWithChecksum(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
segment ts.Segment,
checksum uint32,
persistFn persist.DataFn,
) error {
- return persistFn(id, tags, segment, checksum)
+ return persistFn(metadata, segment, checksum)
}
type iterResources struct {
diff --git a/src/dbnode/persist/fs/merger_test.go b/src/dbnode/persist/fs/merger_test.go
index 3ebbd057a4..c88c3afcb7 100644
--- a/src/dbnode/persist/fs/merger_test.go
+++ b/src/dbnode/persist/fs/merger_test.go
@@ -22,16 +22,21 @@ package fs
import (
"io"
+ "os"
+ "path/filepath"
"testing"
"time"
"github.com/golang/mock/gomock"
+ "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
@@ -51,6 +56,7 @@ var (
identPool ident.Pool
encoderPool encoding.EncoderPool
contextPool context.Pool
+ bytesPool pool.CheckedBytesPool
startTime = time.Now().Truncate(blockSize)
@@ -84,6 +90,10 @@ func init() {
contextPool = context.NewPool(context.NewOptions().
SetContextPoolOptions(poolOpts).
SetFinalizerPoolOptions(poolOpts))
+ bytesPool = pool.NewCheckedBytesPool(nil, poolOpts, func(s []pool.Bucket) pool.BytesPool {
+ return pool.NewBytesPool(s, poolOpts)
+ })
+ bytesPool.Init()
}
func TestMergeWithIntersection(t *testing.T) {
@@ -425,6 +435,99 @@ func TestMergeWithNoData(t *testing.T) {
testMergeWith(t, diskData, mergeTargetData, expected)
}
+func TestCleanup(t *testing.T) {
+ dir := createTempDir(t)
+ filePathPrefix := filepath.Join(dir, "")
+ defer os.RemoveAll(dir)
+
+ // Write fileset to disk
+ fsOpts := NewOptions().
+ SetFilePathPrefix(filePathPrefix)
+
+ md, err := namespace.NewMetadata(ident.StringID("foo"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ blockStart := time.Now()
+ var shard uint32 = 1
+ fsId := FileSetFileIdentifier{
+ Namespace: md.ID(),
+ Shard: shard,
+ BlockStart: blockStart,
+ VolumeIndex: 0,
+ }
+ writeFilesetToDisk(t, fsId, fsOpts)
+
+ // Verify fileset exists
+ exists, err := DataFileSetExists(filePathPrefix, md.ID(), shard, blockStart, 0)
+ require.NoError(t, err)
+ require.True(t, exists)
+
+ // Initialize merger
+ reader, err := NewReader(bytesPool, fsOpts)
+ require.NoError(t, err)
+
+ merger := NewMerger(reader, 0, srPool, multiIterPool, identPool, encoderPool, contextPool,
+ filePathPrefix, namespace.NewOptions())
+
+ // Run merger
+ pm, err := NewPersistManager(fsOpts)
+ require.NoError(t, err)
+
+ preparer, err := pm.StartFlushPersist()
+ require.NoError(t, err)
+
+ err = merger.MergeAndCleanup(fsId, NewNoopMergeWith(), fsId.VolumeIndex+1, preparer,
+ namespace.NewContextFrom(md), &persist.NoOpColdFlushNamespace{}, false)
+ require.NoError(t, err)
+
+ // Verify old fileset gone and new one present
+ exists, err = DataFileSetExists(filePathPrefix, md.ID(), shard, blockStart, 0)
+ require.NoError(t, err)
+ require.False(t, exists)
+
+ exists, err = DataFileSetExists(filePathPrefix, md.ID(), shard, blockStart, 1)
+ require.NoError(t, err)
+ require.True(t, exists)
+}
+
+func TestCleanupOnceBootstrapped(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ preparer := persist.NewMockFlushPreparer(ctrl)
+ md, err := namespace.NewMetadata(ident.StringID("foo"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ merger := merger{}
+ err = merger.MergeAndCleanup(FileSetFileIdentifier{}, NewNoopMergeWith(), 1, preparer,
+ namespace.NewContextFrom(md), &persist.NoOpColdFlushNamespace{}, true)
+ require.Error(t, err)
+}
+
+func writeFilesetToDisk(t *testing.T, fsId FileSetFileIdentifier, fsOpts Options) {
+ w, err := NewWriter(fsOpts)
+ require.NoError(t, err)
+
+ writerOpts := DataWriterOpenOptions{
+ Identifier: fsId,
+ BlockSize: 2 * time.Hour,
+ }
+ err = w.Open(writerOpts)
+ require.NoError(t, err)
+
+ entry := []byte{1, 2, 3}
+
+ chkdBytes := checked.NewBytes(entry, nil)
+ chkdBytes.IncRef()
+ metadata := persist.NewMetadataFromIDAndTags(ident.StringID("foo"),
+ ident.Tags{}, persist.MetadataOptions{})
+ err = w.Write(metadata, chkdBytes, digest.Checksum(entry))
+ require.NoError(t, err)
+
+ err = w.Close()
+ require.NoError(t, err)
+}
+
func testMergeWith(
t *testing.T,
diskData *checkedBytesMap,
@@ -436,12 +539,13 @@ func testMergeWith(
reader := mockReaderFromData(ctrl, diskData)
var persisted []persistedData
+ var deferClosed bool
preparer := persist.NewMockFlushPreparer(ctrl)
preparer.EXPECT().PrepareData(gomock.Any()).Return(
persist.PreparedDataPersist{
- Persist: func(id ident.ID, tags ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persisted = append(persisted, persistedData{
- id: id,
+ metadata: metadata,
// NB(bodu): Once data is persisted the `ts.Segment` gets finalized
// so we can't read from it anymore or that violates the read after
// free invariant. So we `Clone` the segment here.
@@ -449,21 +553,30 @@ func testMergeWith(
})
return nil
},
- Close: func() error { return nil },
+ DeferClose: func() (persist.DataCloser, error) {
+ return func() error {
+ require.False(t, deferClosed)
+ deferClosed = true
+ return nil
+ }, nil
+ },
}, nil)
nsCtx := namespace.Context{}
nsOpts := namespace.NewOptions()
merger := NewMerger(reader, 0, srPool, multiIterPool,
- identPool, encoderPool, contextPool, nsOpts)
+ identPool, encoderPool, contextPool, NewOptions().FilePathPrefix(), nsOpts)
fsID := FileSetFileIdentifier{
Namespace: ident.StringID("test-ns"),
Shard: uint32(8),
BlockStart: startTime,
}
mergeWith := mockMergeWithFromData(t, ctrl, diskData, mergeTargetData)
- err := merger.Merge(fsID, mergeWith, 1, preparer, nsCtx)
+ close, err := merger.Merge(fsID, mergeWith, 1, preparer, nsCtx, &persist.NoOpColdFlushNamespace{})
require.NoError(t, err)
+ require.False(t, deferClosed)
+ require.NoError(t, close())
+ require.True(t, deferClosed)
assertPersistedAsExpected(t, persisted, expectedData)
}
@@ -477,10 +590,10 @@ func assertPersistedAsExpected(
require.Equal(t, expectedData.Len(), len(persisted))
for _, actualData := range persisted {
- id := actualData.id
- data, exists := expectedData.Get(id)
+ id := actualData.metadata.BytesID()
+ data, exists := expectedData.Get(ident.StringID(string(id)))
require.True(t, exists)
- seg := ts.NewSegment(data, nil, ts.FinalizeHead)
+ seg := ts.NewSegment(data, nil, 0, ts.FinalizeHead)
expectedDPs := datapointsFromSegment(t, seg)
actualDPs := datapointsFromSegment(t, actualData.segment)
@@ -520,7 +633,6 @@ func mockReaderFromData(
) *MockDataFileSetReader {
reader := NewMockDataFileSetReader(ctrl)
reader.EXPECT().Open(gomock.Any()).Return(nil)
- reader.EXPECT().Entries().Return(diskData.Len()).Times(2)
reader.EXPECT().Close().Return(nil)
tagIter := ident.NewTagsIterator(ident.NewTags(ident.StringTag("tag-key0", "tag-val0")))
fakeChecksum := uint32(42)
@@ -587,8 +699,11 @@ func mockMergeWithFromData(
data, ok := mergeTargetData.Get(id)
if ok {
segReader := srPool.Get()
- br := []xio.BlockReader{blockReaderFromData(data, segReader, startTime, blockSize)}
- fn(id, ident.Tags{}, br)
+ br := block.FetchBlockResult{
+ Start: startTime,
+ Blocks: []xio.BlockReader{blockReaderFromData(data, segReader, startTime, blockSize)},
+ }
+ fn(doc.Document{ID: id.Bytes()}, br)
}
}
})
@@ -597,8 +712,8 @@ func mockMergeWithFromData(
}
type persistedData struct {
- id ident.ID
- segment ts.Segment
+ metadata persist.Metadata
+ segment ts.Segment
}
func datapointsFromSegment(t *testing.T, seg ts.Segment) []ts.Datapoint {
@@ -624,7 +739,7 @@ func blockReaderFromData(
startTime time.Time,
blockSize time.Duration,
) xio.BlockReader {
- seg := ts.NewSegment(data, nil, ts.FinalizeHead)
+ seg := ts.NewSegment(data, nil, 0, ts.FinalizeHead)
segReader.Reset(seg)
return xio.BlockReader{
SegmentReader: segReader,
diff --git a/src/dbnode/persist/fs/migration/migration.go b/src/dbnode/persist/fs/migration/migration.go
new file mode 100644
index 0000000000..ee8be0f18d
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/migration.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+// Task interface is implemented by tasks that wish to perform a data migration
+// on a fileset. This typically involves updating files in a fileset that were created by
+// a previous version of the database client.
+type Task interface {
+ // Run is the set of steps to successfully complete a migration. Returns the potentially
+ // updated ReadInfoFileResult or an error.
+ Run() (fs.ReadInfoFileResult, error)
+}
+
+// NewTaskFn is a function that can create a new migration task.
+type NewTaskFn func(opts TaskOptions) (Task, error)
+
+// toVersion1_1Task is an object responsible for migrating a fileset to version 1.1.
+type toVersion1_1Task struct {
+ opts TaskOptions
+}
+
+// MigrationTask returns true or false if a fileset should be migrated. If true, also returns
+// a function that can be used to create a new migration task.
+func MigrationTask(info fs.ReadInfoFileResult) (NewTaskFn, bool) {
+ if info.Info.MajorVersion == 1 && info.Info.MinorVersion == 0 {
+ return NewToVersion1_1Task, true
+ }
+ return nil, false
+}
+
+// NewToVersion1_1Task creates a task for migrating a fileset to version 1.1.
+func NewToVersion1_1Task(opts TaskOptions) (Task, error) {
+ if err := opts.Validate(); err != nil {
+ return nil, err
+ }
+ return &toVersion1_1Task{
+ opts: opts,
+ }, nil
+}
+
+// Run executes the steps to bring a fileset to Version 1.1.
+func (v *toVersion1_1Task) Run() (fs.ReadInfoFileResult, error) {
+ var (
+ sOpts = v.opts.StorageOptions()
+ fsOpts = v.opts.FilesystemOptions()
+ newMergerFn = v.opts.NewMergerFn()
+ nsMd = v.opts.NamespaceMetadata()
+ infoFileResult = v.opts.InfoFileResult()
+ shard = v.opts.Shard()
+ persistManager = v.opts.PersistManager()
+ )
+ reader, err := fs.NewReader(sOpts.BytesPool(), fsOpts)
+ if err != nil {
+ return infoFileResult, err
+ }
+
+ merger := newMergerFn(reader, sOpts.DatabaseBlockOptions().DatabaseBlockAllocSize(),
+ sOpts.SegmentReaderPool(), sOpts.MultiReaderIteratorPool(),
+ sOpts.IdentifierPool(), sOpts.EncoderPool(), sOpts.ContextPool(),
+ fsOpts.FilePathPrefix(), nsMd.Options())
+
+ volIndex := infoFileResult.Info.VolumeIndex
+ fsID := fs.FileSetFileIdentifier{
+ Namespace: nsMd.ID(),
+ Shard: shard,
+ BlockStart: xtime.FromNanoseconds(infoFileResult.Info.BlockStart),
+ VolumeIndex: volIndex,
+ }
+
+ nsCtx := namespace.NewContextFrom(nsMd)
+
+ flushPersist, err := persistManager.StartFlushPersist()
+ if err != nil {
+ return infoFileResult, err
+ }
+
+ // Intentionally use a noop merger here as we simply want to rewrite the same files with the current encoder which
+ // will generate index files with the entry level checksums.
+ newIndex := volIndex + 1
+ if err = merger.MergeAndCleanup(fsID, fs.NewNoopMergeWith(), newIndex, flushPersist, nsCtx,
+ &persist.NoOpColdFlushNamespace{}, false); err != nil {
+ return infoFileResult, err
+ }
+
+ if err = flushPersist.DoneFlush(); err != nil {
+ return infoFileResult, err
+ }
+
+ infoFileResult.Info.VolumeIndex = newIndex
+ infoFileResult.Info.MinorVersion = 1
+
+ return infoFileResult, nil
+}
diff --git a/src/dbnode/persist/fs/migration/migration_test.go b/src/dbnode/persist/fs/migration/migration_test.go
new file mode 100644
index 0000000000..8d81f14428
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/migration_test.go
@@ -0,0 +1,187 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/digest"
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/dbnode/storage/block"
+ "github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestToVersion1_1Run(t *testing.T) {
+ dir := createTempDir(t)
+ filePathPrefix := filepath.Join(dir, "")
+ defer os.RemoveAll(dir)
+
+ var shard uint32 = 1
+ nsId := ident.StringID("foo")
+
+ // Write unmigrated fileset to disk
+ fsOpts := writeUnmigratedData(t, filePathPrefix, nsId, shard)
+
+ // Read info file of just written fileset
+ results := fs.ReadInfoFiles(filePathPrefix, nsId, shard,
+ fsOpts.InfoReaderBufferSize(), fsOpts.DecodingOptions(), persist.FileSetFlushType)
+ require.Equal(t, 1, len(results))
+ infoFileResult := results[0]
+ indexFd := openFile(t, fsOpts, nsId, shard, infoFileResult, "index")
+ oldBytes, err := ioutil.ReadAll(indexFd)
+ require.NoError(t, err)
+
+ // Configure and run migration
+ pm, err := fs.NewPersistManager(
+ fsOpts.SetEncodingOptions(msgpack.DefaultLegacyEncodingOptions)) // Set encoder to most up-to-date version
+ require.NoError(t, err)
+
+ md, err := namespace.NewMetadata(nsId, namespace.NewOptions())
+ require.NoError(t, err)
+
+ plCache, closer, err := index.NewPostingsListCache(1, index.PostingsListCacheOptions{
+ InstrumentOptions: instrument.NewOptions(),
+ })
+ defer closer()
+
+ opts := NewTaskOptions().
+ SetNewMergerFn(fs.NewMerger).
+ SetPersistManager(pm).
+ SetNamespaceMetadata(md).
+ SetStorageOptions(storage.NewOptions().
+ SetPersistManager(pm).
+ SetNamespaceInitializer(namespace.NewStaticInitializer([]namespace.Metadata{md})).
+ SetRepairEnabled(false).
+ SetIndexOptions(index.NewOptions().
+ SetPostingsListCache(plCache)).
+ SetBlockLeaseManager(block.NewLeaseManager(nil))).
+ SetShard(shard).
+ SetInfoFileResult(infoFileResult).
+ SetFilesystemOptions(fsOpts)
+
+ task, err := NewToVersion1_1Task(opts)
+ require.NoError(t, err)
+
+ updatedInfoFile, err := task.Run()
+ require.NoError(t, err)
+
+ // Read new info file and make sure it matches results returned by task
+ newInfoFd := openFile(t, fsOpts, nsId, shard, updatedInfoFile, "info")
+
+ newInfoBytes, err := ioutil.ReadAll(newInfoFd)
+ require.NoError(t, err)
+
+ decoder := msgpack.NewDecoder(nil)
+ decoder.Reset(msgpack.NewByteDecoderStream(newInfoBytes))
+ info, err := decoder.DecodeIndexInfo()
+
+ require.Equal(t, updatedInfoFile.Info, info)
+
+ // Read the index entries of new volume set
+ indexFd = openFile(t, fsOpts, nsId, shard, updatedInfoFile, "index")
+ newBytes, err := ioutil.ReadAll(indexFd)
+ require.NoError(t, err)
+
+ // Diff bytes of unmigrated vs migrated fileset
+ require.NotEqual(t, oldBytes, newBytes)
+
+ // Corrupt bytes to trip newly added checksum
+ newBytes[len(newBytes)-1] = 1 + newBytes[len(newBytes)-1]
+ decoder.Reset(msgpack.NewByteDecoderStream(newBytes))
+ _, err = decoder.DecodeIndexEntry(nil)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "checksum mismatch")
+}
+
+func openFile(
+ t *testing.T,
+ fsOpts fs.Options,
+ nsId ident.ID,
+ shard uint32,
+ infoFileResult fs.ReadInfoFileResult,
+ fileType string,
+) *os.File {
+ indexFd, err := os.Open(path.Join(fsOpts.FilePathPrefix(), fmt.Sprintf("data/%s/%d/fileset-%d-%d-%s.db",
+ nsId.String(), shard, infoFileResult.Info.BlockStart, infoFileResult.Info.VolumeIndex, fileType)))
+ require.NoError(t, err)
+ return indexFd
+}
+
+func writeUnmigratedData(t *testing.T, filePathPrefix string, nsId ident.ID, shard uint32) fs.Options {
+ // Use encoding options that will not generate entry level checksums
+ eOpts := msgpack.LegacyEncodingOptions{EncodeLegacyIndexEntryVersion: msgpack.LegacyEncodingIndexEntryVersionV2}
+
+ // Write data
+ fsOpts := fs.NewOptions().
+ SetFilePathPrefix(filePathPrefix).
+ SetEncodingOptions(eOpts)
+ w, err := fs.NewWriter(fsOpts)
+ require.NoError(t, err)
+
+ blockStart := time.Now().Truncate(time.Hour)
+ writerOpts := fs.DataWriterOpenOptions{
+ Identifier: fs.FileSetFileIdentifier{
+ Namespace: nsId,
+ Shard: shard,
+ BlockStart: blockStart,
+ VolumeIndex: 0,
+ },
+ BlockSize: 2 * time.Hour,
+ }
+ err = w.Open(writerOpts)
+ require.NoError(t, err)
+
+ entry := []byte{1, 2, 3}
+
+ chkdBytes := checked.NewBytes(entry, nil)
+ chkdBytes.IncRef()
+ metadata := persist.NewMetadataFromIDAndTags(ident.StringID("foo"),
+ ident.Tags{}, persist.MetadataOptions{})
+ err = w.Write(metadata, chkdBytes, digest.Checksum(entry))
+ require.NoError(t, err)
+
+ err = w.Close()
+ require.NoError(t, err)
+
+ return fsOpts
+}
+
+func createTempDir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "testdir")
+ require.NoError(t, err)
+
+ return dir
+}
diff --git a/src/dbnode/persist/fs/migration/options.go b/src/dbnode/persist/fs/migration/options.go
new file mode 100644
index 0000000000..6e88fd11c3
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/options.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+)
+
+// defaultMigrationConcurrency is the default number of concurrent workers to perform migrations.
+var defaultMigrationConcurrency = int(math.Ceil(float64(runtime.NumCPU()) / 2))
+
+type options struct {
+ targetMigrationVersion MigrationVersion
+ concurrency int
+}
+
+// NewOptions creates new migration options.
+func NewOptions() Options {
+ return &options{
+ concurrency: defaultMigrationConcurrency,
+ }
+}
+
+func (o *options) Validate() error {
+ if err := ValidateMigrationVersion(o.targetMigrationVersion); err != nil {
+ return err
+ }
+ if o.concurrency < 1 {
+ return fmt.Errorf("concurrency value %d must be >= 1", o.concurrency)
+ }
+ return nil
+}
+
+func (o *options) SetTargetMigrationVersion(value MigrationVersion) Options {
+ opts := *o
+ opts.targetMigrationVersion = value
+ return &opts
+}
+
+func (o *options) TargetMigrationVersion() MigrationVersion {
+ return o.targetMigrationVersion
+}
+
+func (o *options) SetConcurrency(value int) Options {
+ opts := *o
+ opts.concurrency = value
+ return &opts
+}
+
+func (o *options) Concurrency() int {
+ return o.concurrency
+}
diff --git a/src/dbnode/persist/fs/migration/options_test.go b/src/dbnode/persist/fs/migration/options_test.go
new file mode 100644
index 0000000000..73712bce7c
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/options_test.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestOptionsTargetMigrationVersion(t *testing.T) {
+ opts := NewOptions()
+ require.Equal(t, MigrationVersionNone, opts.TargetMigrationVersion())
+
+ opts = opts.SetTargetMigrationVersion(MigrationVersion_1_1)
+ require.Equal(t, MigrationVersion_1_1, opts.TargetMigrationVersion())
+}
+
+func TestOptionsConcurrency(t *testing.T) {
+ opts := NewOptions()
+ require.Equal(t, defaultMigrationConcurrency, opts.Concurrency())
+
+ opts = opts.SetConcurrency(100)
+ require.Equal(t, 100, opts.Concurrency())
+}
+
+func TestOptionsValidate(t *testing.T) {
+ opts := NewOptions()
+ require.NoError(t, opts.Validate())
+
+ require.Error(t, opts.SetTargetMigrationVersion(2).Validate())
+ require.Error(t, opts.SetConcurrency(0).Validate())
+}
diff --git a/src/dbnode/persist/fs/migration/task_options.go b/src/dbnode/persist/fs/migration/task_options.go
new file mode 100644
index 0000000000..bfc4617f50
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/task_options.go
@@ -0,0 +1,154 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "errors"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/storage"
+)
+
+var (
+ errNewMergerFnNotSet = errors.New("newMergerFn not set")
+ errNamespaceMetadataNotSet = errors.New("namespaceMetadata not set")
+ errPersistManagerNotSet = errors.New("persistManager not set")
+ errStorageOptionsNotSet = errors.New("storageOptions not set")
+ errFilesystemOptionsNotSet = errors.New("filesystemOptions not set")
+)
+
+type taskOptions struct {
+ newMergerFn fs.NewMergerFn
+ infoFileResult fs.ReadInfoFileResult
+ shard uint32
+ namespaceMetadata namespace.Metadata
+ persistManager persist.Manager
+ storageOptions storage.Options
+ fsOpts fs.Options
+}
+
+// NewTaskOptions creates new taskOptions.
+func NewTaskOptions() TaskOptions {
+ return &taskOptions{
+ newMergerFn: fs.NewMerger,
+ }
+}
+
+func (t *taskOptions) Validate() error {
+ if t.storageOptions == nil {
+ return errStorageOptionsNotSet
+ }
+ if err := t.storageOptions.Validate(); err != nil {
+ return err
+ }
+ if t.newMergerFn == nil {
+ return errNewMergerFnNotSet
+ }
+ if t.infoFileResult.Err != nil && t.infoFileResult.Err.Error() != nil {
+ return t.infoFileResult.Err.Error()
+ }
+ if t.namespaceMetadata == nil {
+ return errNamespaceMetadataNotSet
+ }
+ if t.persistManager == nil {
+ return errPersistManagerNotSet
+ }
+ if t.fsOpts == nil {
+ return errFilesystemOptionsNotSet
+ }
+ if err := t.fsOpts.Validate(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *taskOptions) SetNewMergerFn(value fs.NewMergerFn) TaskOptions {
+ to := *t
+ to.newMergerFn = value
+ return &to
+}
+
+func (t *taskOptions) NewMergerFn() fs.NewMergerFn {
+ return t.newMergerFn
+}
+
+func (t *taskOptions) SetInfoFileResult(value fs.ReadInfoFileResult) TaskOptions {
+ to := *t
+ to.infoFileResult = value
+ return &to
+}
+
+func (t *taskOptions) InfoFileResult() fs.ReadInfoFileResult {
+ return t.infoFileResult
+}
+
+func (t *taskOptions) SetShard(value uint32) TaskOptions {
+ to := *t
+ to.shard = value
+ return &to
+}
+
+func (t *taskOptions) Shard() uint32 {
+ return t.shard
+}
+
+func (t *taskOptions) SetNamespaceMetadata(value namespace.Metadata) TaskOptions {
+ to := *t
+ to.namespaceMetadata = value
+ return &to
+}
+
+func (t *taskOptions) NamespaceMetadata() namespace.Metadata {
+ return t.namespaceMetadata
+}
+
+func (t *taskOptions) SetPersistManager(value persist.Manager) TaskOptions {
+ to := *t
+ to.persistManager = value
+ return &to
+}
+
+func (t *taskOptions) PersistManager() persist.Manager {
+ return t.persistManager
+}
+
+func (t *taskOptions) SetStorageOptions(value storage.Options) TaskOptions {
+ to := *t
+ to.storageOptions = value
+ return &to
+}
+
+func (t *taskOptions) StorageOptions() storage.Options {
+ return t.storageOptions
+}
+
+func (t *taskOptions) SetFilesystemOptions(value fs.Options) TaskOptions {
+ to := *t
+ to.fsOpts = value
+ return &to
+}
+
+func (t *taskOptions) FilesystemOptions() fs.Options {
+ return t.fsOpts
+}
diff --git a/src/dbnode/persist/fs/migration/task_options_test.go b/src/dbnode/persist/fs/migration/task_options_test.go
new file mode 100644
index 0000000000..5996982b5a
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/task_options_test.go
@@ -0,0 +1,153 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/mock/gomock"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/x/ident"
+ "github.com/stretchr/testify/require"
+)
+
+func defaultTestOptions(t *testing.T, ctrl *gomock.Controller) TaskOptions {
+ md, err := namespace.NewMetadata(ident.StringID("ns"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ pm, err := fs.NewPersistManager(fs.NewOptions())
+ require.NoError(t, err)
+
+ mockOpts := storage.NewMockOptions(ctrl)
+ mockOpts.EXPECT().Validate().AnyTimes()
+
+ return NewTaskOptions().
+ SetInfoFileResult(fs.ReadInfoFileResult{}).
+ SetShard(1).
+ SetNamespaceMetadata(md).
+ SetPersistManager(pm).
+ SetStorageOptions(mockOpts).
+ SetFilesystemOptions(fs.NewOptions())
+}
+
+func TestValidateStorageOptions(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ opts := defaultTestOptions(t, ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetStorageOptions(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestValidateNewMergerFn(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ opts := defaultTestOptions(t, ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetNewMergerFn(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestValidateInfoResultErr(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ opts := defaultTestOptions(t, ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetInfoFileResult(fs.ReadInfoFileResult{Err: testReadInfoFileResultError{}})
+ require.Error(t, opts.Validate())
+}
+
+func TestValidateNamespaceMetadata(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ opts := defaultTestOptions(t, ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetNamespaceMetadata(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestValidatePersistManager(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ opts := defaultTestOptions(t, ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetPersistManager(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestInfoFileResult(t *testing.T) {
+ opts := NewTaskOptions()
+ value := fs.ReadInfoFileResult{}
+ require.Equal(t, value, opts.SetInfoFileResult(value).InfoFileResult())
+}
+
+func TestShard(t *testing.T) {
+ opts := NewTaskOptions()
+ value := uint32(1)
+ require.Equal(t, value, opts.SetShard(value).Shard())
+}
+
+func TestNamespaceMetadata(t *testing.T) {
+ opts := NewTaskOptions()
+ value, err := namespace.NewMetadata(ident.StringID("ns"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ require.Equal(t, value, opts.SetNamespaceMetadata(value).NamespaceMetadata())
+}
+
+func TestPersistManager(t *testing.T) {
+ opts := NewTaskOptions()
+ value, err := fs.NewPersistManager(fs.NewOptions())
+ require.NoError(t, err)
+
+ require.Equal(t, value, opts.SetPersistManager(value).PersistManager())
+}
+
+func TestStorageOptions(t *testing.T) {
+ opts := NewTaskOptions()
+ value := storage.NewOptions()
+ require.Equal(t, value, opts.SetStorageOptions(value).StorageOptions())
+}
+
+type testReadInfoFileResultError struct {
+}
+
+func (t testReadInfoFileResultError) Error() error {
+ return fmt.Errorf("error")
+}
+
+func (t testReadInfoFileResultError) Filepath() string {
+ return ""
+}
diff --git a/src/dbnode/persist/fs/migration/types.go b/src/dbnode/persist/fs/migration/types.go
new file mode 100644
index 0000000000..eb67e6af4d
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/types.go
@@ -0,0 +1,97 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/storage"
+)
+
+// Options represents the options for migrations.
+type Options interface {
+ // Validate validates migration options.
+ Validate() error
+
+ // SetTargetMigrationVersion sets the target version for a migration
+ SetTargetMigrationVersion(value MigrationVersion) Options
+
+ // TargetMigrationVersion is the target version for a migration.
+ TargetMigrationVersion() MigrationVersion
+
+ // SetConcurrency sets the number of concurrent workers performing migrations.
+ SetConcurrency(value int) Options
+
+ // Concurrency gets the number of concurrent workers performing migrations.
+ Concurrency() int
+}
+
+// MigrationVersion is an enum that corresponds to the major and minor version number to migrate data files to.
+type MigrationVersion uint
+
+// TaskOptions represents options for individual migration tasks.
+type TaskOptions interface {
+ // Validate validates the options.
+ Validate() error
+
+ // SetNewMergerFn sets the function to create a new Merger.
+ SetNewMergerFn(value fs.NewMergerFn) TaskOptions
+
+ // NewMergerFn returns the function to create a new Merger.
+ NewMergerFn() fs.NewMergerFn
+
+ // SetInfoFileResult sets the info file resulted associated with this task.
+ SetInfoFileResult(value fs.ReadInfoFileResult) TaskOptions
+
+ // InfoFileResult gets the info file resulted associated with this task.
+ InfoFileResult() fs.ReadInfoFileResult
+
+ // SetShard sets the shard associated with this task.
+ SetShard(value uint32) TaskOptions
+
+ // Shard gets the shard associated with this task.
+ Shard() uint32
+
+ // SetNamespaceMetadata sets the namespace metadata associated with this task.
+ SetNamespaceMetadata(value namespace.Metadata) TaskOptions
+
+ // NamespaceMetadata gets the namespace metadata associated with this task.
+ NamespaceMetadata() namespace.Metadata
+
+ // SetPersistManager sets the persist manager used for this task.
+ SetPersistManager(value persist.Manager) TaskOptions
+
+ // PersistManager gets the persist manager use for this task.
+ PersistManager() persist.Manager
+
+ // SetStorageOptions sets the storage options associated with this task.
+ SetStorageOptions(value storage.Options) TaskOptions
+
+ // StorageOptions gets the storage options associated with this task.
+ StorageOptions() storage.Options
+
+ // SetFilesystemOptions sets the filesystem options.
+ SetFilesystemOptions(value fs.Options) TaskOptions
+
+ // FilesystemOptions returns the filesystem options.
+ FilesystemOptions() fs.Options
+}
diff --git a/src/dbnode/persist/fs/migration/version.go b/src/dbnode/persist/fs/migration/version.go
new file mode 100644
index 0000000000..9eebc32984
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/version.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import "fmt"
+
+const (
+ // MigrationVersionNone indicates node should not attempt to perform any migrations.
+ MigrationVersionNone MigrationVersion = iota
+ // MigrationVersion_1_1 indicates node should attempt to migrate data files up to version 1.1.
+ MigrationVersion_1_1
+)
+
+var (
+ validMigrationVersions = []MigrationVersion{
+ MigrationVersionNone,
+ MigrationVersion_1_1,
+ }
+)
+
+func (m *MigrationVersion) String() string {
+ switch *m {
+ case MigrationVersionNone:
+ return "none"
+ case MigrationVersion_1_1:
+ return "1.1"
+ default:
+ return "unknown"
+ }
+}
+
+// ParseMigrationVersion parses a string for a MigrationVersion.
+func ParseMigrationVersion(str string) (MigrationVersion, error) {
+ for _, valid := range validMigrationVersions {
+ if str == valid.String() {
+ return valid, nil
+ }
+ }
+
+ return 0, fmt.Errorf("unrecognized migrate version: %v", str)
+}
+
+// ValidateMigrationVersion validates a stored metrics type.
+func ValidateMigrationVersion(m MigrationVersion) error {
+ for _, valid := range validMigrationVersions {
+ if valid == m {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("invalid migrate version '%v': should be one of %v",
+ m, validMigrationVersions)
+}
+
+// UnmarshalYAML unmarshals a migrate version.
+func (m *MigrationVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var str string
+ if err := unmarshal(&str); err != nil {
+ return err
+ }
+
+ if value, err := ParseMigrationVersion(str); err == nil {
+ *m = value
+ return nil
+ }
+
+ return fmt.Errorf("invalid MigrationVersion '%s' valid types are: %v",
+ str, validMigrationVersions)
+}
diff --git a/src/dbnode/persist/fs/migration/version_test.go b/src/dbnode/persist/fs/migration/version_test.go
new file mode 100644
index 0000000000..a5faeac91b
--- /dev/null
+++ b/src/dbnode/persist/fs/migration/version_test.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migration
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ yaml "gopkg.in/yaml.v2"
+)
+
+func TestParseMigrateVersion(t *testing.T) {
+ v, err := ParseMigrationVersion("none")
+ require.NoError(t, err)
+ require.Equal(t, MigrationVersionNone, v)
+
+ v, err = ParseMigrationVersion("1.1")
+ require.NoError(t, err)
+ require.Equal(t, MigrationVersion_1_1, v)
+}
+
+func TestValidateMigrateVersion(t *testing.T) {
+ err := ValidateMigrationVersion(MigrationVersion_1_1)
+ require.NoError(t, err)
+
+ err = ValidateMigrationVersion(2)
+ require.Error(t, err)
+}
+
+func TestUnmarshalYAML(t *testing.T) {
+ type config struct {
+ Version MigrationVersion `yaml:"version"`
+ }
+
+ for _, value := range validMigrationVersions {
+ str := fmt.Sprintf("version: %s\n", value.String())
+ var cfg config
+ require.NoError(t, yaml.Unmarshal([]byte(str), &cfg))
+ require.Equal(t, value, cfg.Version)
+ }
+
+ var cfg config
+ require.Error(t, yaml.Unmarshal([]byte("version: abc"), &cfg))
+}
diff --git a/src/dbnode/persist/fs/msgpack/decoder.go b/src/dbnode/persist/fs/msgpack/decoder.go
index a42f354d4f..848bc616f3 100644
--- a/src/dbnode/persist/fs/msgpack/decoder.go
+++ b/src/dbnode/persist/fs/msgpack/decoder.go
@@ -46,6 +46,7 @@ var (
errorUnableToDetermineNumFieldsToSkip = errors.New("unable to determine num fields to skip")
errorCalledDecodeBytesWithoutByteStreamDecoder = errors.New("called decodeBytes with out byte stream decoder")
+ errorIndexEntryChecksumMismatch = errors.New("decode index entry encountered checksum mismatch")
)
// Decoder decodes persisted msgpack-encoded data
@@ -53,20 +54,23 @@ type Decoder struct {
reader DecoderStream
// Will only be set if the Decoder is Reset() with a DecoderStream
// that also implements ByteStream.
- byteReader ByteStream
+ byteReader ByteStream
+ // Wraps original reader with reader that can calculate digest. Digest calculation must be enabled,
+ // otherwise it defaults to off.
+ readerWithDigest *decoderStreamWithDigest
dec *msgpack.Decoder
err error
allocDecodedBytes bool
- legacy legacyEncodingOptions
+ legacy LegacyEncodingOptions
}
// NewDecoder creates a new decoder
func NewDecoder(opts DecodingOptions) *Decoder {
- return newDecoder(defaultlegacyEncodingOptions, opts)
+ return newDecoder(DefaultLegacyEncodingOptions, opts)
}
-func newDecoder(legacy legacyEncodingOptions, opts DecodingOptions) *Decoder {
+func newDecoder(legacy LegacyEncodingOptions, opts DecodingOptions) *Decoder {
if opts == nil {
opts = NewDecodingOptions()
}
@@ -76,6 +80,7 @@ func newDecoder(legacy legacyEncodingOptions, opts DecodingOptions) *Decoder {
reader: reader,
dec: msgpack.NewDecoder(reader),
legacy: legacy,
+ readerWithDigest: newDecoderStreamWithDigest(nil),
}
}
@@ -91,7 +96,8 @@ func (dec *Decoder) Reset(stream DecoderStream) {
dec.byteReader = nil
}
- dec.dec.Reset(dec.reader)
+ dec.readerWithDigest.reset(dec.reader)
+ dec.dec.Reset(dec.readerWithDigest)
dec.err = nil
}
@@ -115,8 +121,10 @@ func (dec *Decoder) DecodeIndexEntry(bytesPool pool.BytesPool) (schema.IndexEntr
if dec.err != nil {
return emptyIndexEntry, dec.err
}
+ dec.readerWithDigest.setDigestReaderEnabled(true)
_, numFieldsToSkip := dec.decodeRootObject(indexEntryVersion, indexEntryType)
indexEntry := dec.decodeIndexEntry(bytesPool)
+ dec.readerWithDigest.setDigestReaderEnabled(false)
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyIndexEntry, dec.err
@@ -242,22 +250,27 @@ func (dec *Decoder) decodeIndexInfo() schema.IndexInfo {
var opts checkNumFieldsOptions
// Overrides only used to test forwards compatibility.
- switch dec.legacy.decodeLegacyIndexInfoVersion {
- case legacyEncodingIndexVersionV1:
+ switch dec.legacy.DecodeLegacyIndexInfoVersion {
+ case LegacyEncodingIndexVersionV1:
// V1 had 6 fields.
opts.override = true
opts.numExpectedMinFields = 6
opts.numExpectedCurrFields = 6
- case legacyEncodingIndexVersionV2:
+ case LegacyEncodingIndexVersionV2:
// V2 had 8 fields.
opts.override = true
opts.numExpectedMinFields = 6
opts.numExpectedCurrFields = 8
- case legacyEncodingIndexVersionV3:
+ case LegacyEncodingIndexVersionV3:
// V3 had 9 fields.
opts.override = true
opts.numExpectedMinFields = 6
opts.numExpectedCurrFields = 9
+ case LegacyEncodingIndexVersionV4:
+ // V4 had 10 fields.
+ opts.override = true
+ opts.numExpectedMinFields = 6
+ opts.numExpectedCurrFields = 10
}
numFieldsToSkip, actual, ok := dec.checkNumFieldsFor(indexInfoType, opts)
@@ -274,7 +287,7 @@ func (dec *Decoder) decodeIndexInfo() schema.IndexInfo {
indexInfo.BloomFilter = dec.decodeIndexBloomFilterInfo()
// At this point if its a V1 file we've decoded all the available fields.
- if dec.legacy.decodeLegacyIndexInfoVersion == legacyEncodingIndexVersionV1 || actual < 8 {
+ if dec.legacy.DecodeLegacyIndexInfoVersion == LegacyEncodingIndexVersionV1 || actual < 8 {
dec.skip(numFieldsToSkip)
return indexInfo
}
@@ -284,7 +297,7 @@ func (dec *Decoder) decodeIndexInfo() schema.IndexInfo {
indexInfo.FileType = persist.FileSetType(dec.decodeVarint())
// At this point if its a V2 file we've decoded all the available fields.
- if dec.legacy.decodeLegacyIndexInfoVersion == legacyEncodingIndexVersionV2 || actual < 9 {
+ if dec.legacy.DecodeLegacyIndexInfoVersion == LegacyEncodingIndexVersionV2 || actual < 9 {
dec.skip(numFieldsToSkip)
return indexInfo
}
@@ -293,7 +306,7 @@ func (dec *Decoder) decodeIndexInfo() schema.IndexInfo {
indexInfo.SnapshotID, _, _ = dec.decodeBytes()
// At this point if its a V3 file we've decoded all the available fields.
- if dec.legacy.decodeLegacyIndexInfoVersion == legacyEncodingIndexVersionV3 || actual < 10 {
+ if dec.legacy.DecodeLegacyIndexInfoVersion == LegacyEncodingIndexVersionV3 || actual < 10 {
dec.skip(numFieldsToSkip)
return indexInfo
}
@@ -301,6 +314,15 @@ func (dec *Decoder) decodeIndexInfo() schema.IndexInfo {
// Decode fields added in V4.
indexInfo.VolumeIndex = int(dec.decodeVarint())
+ // At this point if its a V4 file we've decoded all the available fields.
+ if dec.legacy.DecodeLegacyIndexInfoVersion == LegacyEncodingIndexVersionV4 || actual < 11 {
+ dec.skip(numFieldsToSkip)
+ return indexInfo
+ }
+
+ // Decode fields added in V5.
+ indexInfo.MinorVersion = dec.decodeVarint()
+
dec.skip(numFieldsToSkip)
return indexInfo
}
@@ -336,12 +358,25 @@ func (dec *Decoder) decodeIndexBloomFilterInfo() schema.IndexBloomFilterInfo {
func (dec *Decoder) decodeIndexEntry(bytesPool pool.BytesPool) schema.IndexEntry {
var opts checkNumFieldsOptions
- if dec.legacy.decodeLegacyV1IndexEntry {
+ switch dec.legacy.DecodeLegacyIndexEntryVersion {
+ case LegacyEncodingIndexEntryVersionV1:
// V1 had 5 fields.
opts.override = true
opts.numExpectedMinFields = 5
opts.numExpectedCurrFields = 5
+ case LegacyEncodingIndexEntryVersionV2:
+ // V2 had 6 fields.
+ opts.override = true
+ opts.numExpectedMinFields = 5
+ opts.numExpectedCurrFields = 6
+ case LegacyEncodingIndexEntryVersionCurrent:
+ // V3 is current version, no overrides needed
+ break
+ default:
+ dec.err = fmt.Errorf("invalid legacyEncodingIndexEntryVersion provided: %v", dec.legacy.DecodeLegacyIndexEntryVersion)
+ return emptyIndexEntry
}
+
numFieldsToSkip, actual, ok := dec.checkNumFieldsFor(indexEntryType, opts)
if !ok {
return emptyIndexEntry
@@ -358,20 +393,44 @@ func (dec *Decoder) decodeIndexEntry(bytesPool pool.BytesPool) schema.IndexEntry
indexEntry.Size = dec.decodeVarint()
indexEntry.Offset = dec.decodeVarint()
- indexEntry.Checksum = dec.decodeVarint()
+ indexEntry.DataChecksum = dec.decodeVarint()
- if dec.legacy.decodeLegacyV1IndexEntry || actual < 6 {
+ // At this point, if its a V1 file, we've decoded all the available fields.
+ if dec.legacy.DecodeLegacyIndexEntryVersion == LegacyEncodingIndexEntryVersionV1 || actual < 6 {
dec.skip(numFieldsToSkip)
return indexEntry
}
+ // Decode fields added in V2
if bytesPool == nil {
indexEntry.EncodedTags, _, _ = dec.decodeBytes()
} else {
indexEntry.EncodedTags = dec.decodeBytesWithPool(bytesPool)
}
+ // At this point, if its a V2 file, we've decoded all the available fields.
+ if dec.legacy.DecodeLegacyIndexEntryVersion == LegacyEncodingIndexEntryVersionV2 || actual < 7 {
+ dec.skip(numFieldsToSkip)
+ return indexEntry
+ }
+
+ // NB(nate): Any new fields should be parsed here.
+
+ // Intentionally skip any extra fields here as we've stipulated that from V3 onward, IndexEntryChecksum will be the
+ // final field on index entries
dec.skip(numFieldsToSkip)
+
+ // Retrieve actual checksum value here. Attempting to retrieve after decoding the upcoming expected checksum field
+ // would include value in actual checksum calculation which would cause a mismatch
+ actualChecksum := dec.readerWithDigest.digest().Sum32()
+
+ // Decode checksum field originally added in V3
+ expectedChecksum := uint32(dec.decodeVarint())
+
+ if expectedChecksum != actualChecksum {
+ dec.err = errorIndexEntryChecksumMismatch
+ }
+
return indexEntry
}
@@ -621,6 +680,11 @@ func (dec *Decoder) decodeBytes() ([]byte, int, int) {
return nil, -1, -1
}
value = backingBytes[currPos:targetPos]
+ if err := dec.readerWithDigest.capture(value); err != nil {
+ dec.err = err
+ return nil, -1, -1
+ }
+
return value, currPos, bytesLen
}
@@ -638,7 +702,7 @@ func (dec *Decoder) decodeBytesWithPool(bytesPool pool.BytesPool) []byte {
}
bytes := bytesPool.Get(bytesLen)[:bytesLen]
- n, err := io.ReadFull(dec.reader, bytes)
+ n, err := io.ReadFull(dec.readerWithDigest, bytes)
if err != nil {
dec.err = err
bytesPool.Put(bytes)
diff --git a/src/dbnode/persist/fs/msgpack/decoder_test.go b/src/dbnode/persist/fs/msgpack/decoder_test.go
index 3f08c5cf23..d36f0b120b 100644
--- a/src/dbnode/persist/fs/msgpack/decoder_test.go
+++ b/src/dbnode/persist/fs/msgpack/decoder_test.go
@@ -23,6 +23,7 @@ package msgpack
import (
"testing"
+ "github.com/m3db/m3/src/dbnode/digest"
"github.com/stretchr/testify/require"
)
@@ -128,12 +129,27 @@ func TestDecodeIndexEntryMoreFieldsThanExpected(t *testing.T) {
// Intentionally bump number of fields for the index entry object
enc.encodeNumObjectFieldsForFn = testGenEncodeNumObjectFieldsForFn(enc, indexEntryType, 1)
require.NoError(t, enc.EncodeIndexEntry(testIndexEntry))
+
+ // This hokey bit of logic is done so we can add extra fields in the correct location (since new IndexEntry fields
+ // will be added *before* the checksum). Confirm current checksum is correct, strip it, add unexpected field,
+ // and re-add updated checksum value
+
+ // Validate existing checksum
+ checksumPos := len(enc.Bytes()) - 5 // 5 bytes = 1 byte for integer code + 4 bytes for checksum
+ dec.Reset(NewByteDecoderStream(enc.Bytes()[checksumPos:]))
+ currChecksum := dec.decodeVarint()
+ require.Equal(t, currChecksum, int64(digest.Checksum(enc.Bytes()[:checksumPos])))
+
+ // Strip checksum, add new field, add updated checksum
+ enc.buf.Truncate(len(enc.Bytes()) - 5)
require.NoError(t, enc.enc.EncodeInt64(1234))
+ require.NoError(t, enc.enc.EncodeInt64(int64(digest.Checksum(enc.Bytes()))))
// Verify we can successfully skip unnecessary fields
dec.Reset(NewByteDecoderStream(enc.Bytes()))
res, err := dec.DecodeIndexEntry(nil)
require.NoError(t, err)
+
require.Equal(t, testIndexEntry, res)
}
@@ -246,3 +262,19 @@ func TestDecodeBytesAllocNew(t *testing.T) {
}
require.Equal(t, []byte("testIndexEntry"), res.ID)
}
+
+func TestDecodeIndexEntryInvalidChecksum(t *testing.T) {
+ var (
+ enc = NewEncoder()
+ dec = NewDecoder(nil)
+ )
+ require.NoError(t, enc.EncodeIndexEntry(testIndexEntry))
+
+ // Update to invalid checksum
+ enc.buf.Truncate(len(enc.Bytes()) - 5) // 5 bytes = 1 byte for integer code + 4 bytes for checksum
+ require.NoError(t, enc.enc.EncodeInt64(1234))
+
+ dec.Reset(NewByteDecoderStream(enc.Bytes()))
+ _, err := dec.DecodeIndexEntry(nil)
+ require.Error(t, err)
+}
diff --git a/src/dbnode/persist/fs/msgpack/encoder.go b/src/dbnode/persist/fs/msgpack/encoder.go
index c41a6639c4..1745dcf696 100644
--- a/src/dbnode/persist/fs/msgpack/encoder.go
+++ b/src/dbnode/persist/fs/msgpack/encoder.go
@@ -23,6 +23,7 @@ package msgpack
import (
"bytes"
+ "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/persist/schema"
"gopkg.in/vmihailenco/msgpack.v2"
@@ -50,41 +51,61 @@ type Encoder struct {
encodeBytesFn encodeBytesFn
encodeArrayLenFn encodeArrayLenFn
- legacy legacyEncodingOptions
+ legacy LegacyEncodingOptions
}
-type legacyEncodingIndexInfoVersion int
+// LegacyEncodingIndexInfoVersion is the encoding/decoding version to use when processing index info files
+type LegacyEncodingIndexInfoVersion int
const (
- legacyEncodingIndexVersionCurrent = legacyEncodingIndexVersionV4
- legacyEncodingIndexVersionV1 legacyEncodingIndexInfoVersion = iota
- legacyEncodingIndexVersionV2
- legacyEncodingIndexVersionV3
- legacyEncodingIndexVersionV4
+ LegacyEncodingIndexVersionCurrent = LegacyEncodingIndexVersionV5
+ LegacyEncodingIndexVersionV1 LegacyEncodingIndexInfoVersion = iota
+ LegacyEncodingIndexVersionV2
+ LegacyEncodingIndexVersionV3
+ LegacyEncodingIndexVersionV4
+ LegacyEncodingIndexVersionV5
)
-type legacyEncodingOptions struct {
- encodeLegacyIndexInfoVersion legacyEncodingIndexInfoVersion
- decodeLegacyIndexInfoVersion legacyEncodingIndexInfoVersion
+// LegacyEncodingIndexEntryVersion is the encoding/decoding version to use when processing index entries
+type LegacyEncodingIndexEntryVersion int
- encodeLegacyV1IndexEntry bool
- decodeLegacyV1IndexEntry bool
+const (
+ LegacyEncodingIndexEntryVersionCurrent = LegacyEncodingIndexEntryVersionV3
+ LegacyEncodingIndexEntryVersionV1 LegacyEncodingIndexEntryVersion = iota
+ LegacyEncodingIndexEntryVersionV2
+ LegacyEncodingIndexEntryVersionV3
+)
+
+// LegacyEncodingOptions allows you to specify the version to use when encoding/decoding
+// index info and index files
+type LegacyEncodingOptions struct {
+ EncodeLegacyIndexInfoVersion LegacyEncodingIndexInfoVersion
+ DecodeLegacyIndexInfoVersion LegacyEncodingIndexInfoVersion
+
+ EncodeLegacyIndexEntryVersion LegacyEncodingIndexEntryVersion
+ DecodeLegacyIndexEntryVersion LegacyEncodingIndexEntryVersion
}
-var defaultlegacyEncodingOptions = legacyEncodingOptions{
- encodeLegacyIndexInfoVersion: legacyEncodingIndexVersionCurrent,
- decodeLegacyIndexInfoVersion: legacyEncodingIndexVersionCurrent,
+// DefaultLegacyEncodingOptions are the default options to use with msgpack.Encoder and msgpack.Decoder.
+var DefaultLegacyEncodingOptions = LegacyEncodingOptions{
+ EncodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionCurrent,
+ DecodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionCurrent,
- encodeLegacyV1IndexEntry: false,
- decodeLegacyV1IndexEntry: false,
+ EncodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionCurrent,
+ DecodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionCurrent,
}
// NewEncoder creates a new encoder.
func NewEncoder() *Encoder {
- return newEncoder(defaultlegacyEncodingOptions)
+ return newEncoder(DefaultLegacyEncodingOptions)
}
-func newEncoder(legacy legacyEncodingOptions) *Encoder {
+// NewEncoderWithOptions creates a new encoder with the specified legacy options.
+func NewEncoderWithOptions(legacy LegacyEncodingOptions) *Encoder {
+ return newEncoder(legacy)
+}
+
+func newEncoder(legacy LegacyEncodingOptions) *Encoder {
buf := bytes.NewBuffer(nil)
enc := &Encoder{
buf: buf,
@@ -99,7 +120,8 @@ func newEncoder(legacy legacyEncodingOptions) *Encoder {
enc.encodeBytesFn = enc.encodeBytes
enc.encodeArrayLenFn = enc.encodeArrayLen
- // Used primarily for testing.
+ // Used primarily for testing however legitimate production uses exist (e.g. addition of IndexEntryChecksum in
+ // IndexEntryV3)
enc.legacy = legacy
return enc
@@ -120,15 +142,17 @@ func (enc *Encoder) EncodeIndexInfo(info schema.IndexInfo) error {
return enc.err
}
enc.encodeRootObject(indexInfoVersion, indexInfoType)
- switch enc.legacy.encodeLegacyIndexInfoVersion {
- case legacyEncodingIndexVersionV1:
+ switch enc.legacy.EncodeLegacyIndexInfoVersion {
+ case LegacyEncodingIndexVersionV1:
enc.encodeIndexInfoV1(info)
- case legacyEncodingIndexVersionV2:
+ case LegacyEncodingIndexVersionV2:
enc.encodeIndexInfoV2(info)
- case legacyEncodingIndexVersionV3:
+ case LegacyEncodingIndexVersionV3:
enc.encodeIndexInfoV3(info)
- default:
+ case LegacyEncodingIndexVersionV4:
enc.encodeIndexInfoV4(info)
+ default:
+ enc.encodeIndexInfoV5(info)
}
return enc.err
}
@@ -138,11 +162,19 @@ func (enc *Encoder) EncodeIndexEntry(entry schema.IndexEntry) error {
if enc.err != nil {
return enc.err
}
+
+ // There's no guarantee EncodeIndexEntry is called with an empty buffer so ensure
+ // only checksumming the bits we care about.
+ checksumStart := enc.buf.Len()
+
enc.encodeRootObject(indexEntryVersion, indexEntryType)
- if enc.legacy.encodeLegacyV1IndexEntry {
+ switch enc.legacy.EncodeLegacyIndexEntryVersion {
+ case LegacyEncodingIndexEntryVersionV1:
enc.encodeIndexEntryV1(entry)
- } else {
+ case LegacyEncodingIndexEntryVersionV2:
enc.encodeIndexEntryV2(entry)
+ default:
+ enc.encodeIndexEntryV3(entry, checksumStart)
}
return enc.err
}
@@ -232,6 +264,20 @@ func (enc *Encoder) encodeIndexInfoV3(info schema.IndexInfo) {
}
func (enc *Encoder) encodeIndexInfoV4(info schema.IndexInfo) {
+ enc.encodeArrayLenFn(10) // V4 had 10 fields.
+ enc.encodeVarintFn(info.BlockStart)
+ enc.encodeVarintFn(info.BlockSize)
+ enc.encodeVarintFn(info.Entries)
+ enc.encodeVarintFn(info.MajorVersion)
+ enc.encodeIndexSummariesInfo(info.Summaries)
+ enc.encodeIndexBloomFilterInfo(info.BloomFilter)
+ enc.encodeVarintFn(info.SnapshotTime)
+ enc.encodeVarintFn(int64(info.FileType))
+ enc.encodeBytesFn(info.SnapshotID)
+ enc.encodeVarintFn(int64(info.VolumeIndex))
+}
+
+func (enc *Encoder) encodeIndexInfoV5(info schema.IndexInfo) {
enc.encodeNumObjectFieldsForFn(indexInfoType)
enc.encodeVarintFn(info.BlockStart)
enc.encodeVarintFn(info.BlockSize)
@@ -243,6 +289,7 @@ func (enc *Encoder) encodeIndexInfoV4(info schema.IndexInfo) {
enc.encodeVarintFn(int64(info.FileType))
enc.encodeBytesFn(info.SnapshotID)
enc.encodeVarintFn(int64(info.VolumeIndex))
+ enc.encodeVarintFn(info.MinorVersion)
}
func (enc *Encoder) encodeIndexSummariesInfo(info schema.IndexSummariesInfo) {
@@ -265,17 +312,30 @@ func (enc *Encoder) encodeIndexEntryV1(entry schema.IndexEntry) {
enc.encodeBytesFn(entry.ID)
enc.encodeVarintFn(entry.Size)
enc.encodeVarintFn(entry.Offset)
- enc.encodeVarintFn(entry.Checksum)
+ enc.encodeVarintFn(entry.DataChecksum)
}
func (enc *Encoder) encodeIndexEntryV2(entry schema.IndexEntry) {
+ enc.encodeArrayLenFn(6) // V2 had 6 fields.
+ enc.encodeVarintFn(entry.Index)
+ enc.encodeBytesFn(entry.ID)
+ enc.encodeVarintFn(entry.Size)
+ enc.encodeVarintFn(entry.Offset)
+ enc.encodeVarintFn(entry.DataChecksum)
+ enc.encodeBytesFn(entry.EncodedTags)
+}
+
+func (enc *Encoder) encodeIndexEntryV3(entry schema.IndexEntry, checksumStart int) {
enc.encodeNumObjectFieldsForFn(indexEntryType)
enc.encodeVarintFn(entry.Index)
enc.encodeBytesFn(entry.ID)
enc.encodeVarintFn(entry.Size)
enc.encodeVarintFn(entry.Offset)
- enc.encodeVarintFn(entry.Checksum)
+ enc.encodeVarintFn(entry.DataChecksum)
enc.encodeBytesFn(entry.EncodedTags)
+
+ checksum := digest.Checksum(enc.Bytes()[checksumStart:])
+ enc.encodeVarintFn(int64(checksum))
}
func (enc *Encoder) encodeIndexSummary(summary schema.IndexSummary) {
diff --git a/src/dbnode/persist/fs/msgpack/encoder_test.go b/src/dbnode/persist/fs/msgpack/encoder_test.go
index 2b8ef0a753..8acf8961dd 100644
--- a/src/dbnode/persist/fs/msgpack/encoder_test.go
+++ b/src/dbnode/persist/fs/msgpack/encoder_test.go
@@ -40,19 +40,33 @@ func testCapturingEncoder(t *testing.T) (*Encoder, *[]interface{}) {
encoder := NewEncoder()
var result []interface{}
+ actualEncodeVarintFn := encoder.encodeVarintFn
encoder.encodeVarintFn = func(value int64) {
+ actualEncodeVarintFn(value)
result = append(result, value)
}
+
+ actualEncodeVarUintFn := encoder.encodeVarUintFn
encoder.encodeVarUintFn = func(value uint64) {
+ actualEncodeVarUintFn(value)
result = append(result, value)
}
+
+ actualEncodeFloat64Fn := encoder.encodeFloat64Fn
encoder.encodeFloat64Fn = func(value float64) {
+ actualEncodeFloat64Fn(value)
result = append(result, value)
}
+
+ actualEncodeBytesFn := encoder.encodeBytesFn
encoder.encodeBytesFn = func(value []byte) {
+ actualEncodeBytesFn(value)
result = append(result, value)
}
+
+ actualEncodeArrayLenFn := encoder.encodeArrayLenFn
encoder.encodeArrayLenFn = func(value int) {
+ actualEncodeArrayLenFn(value)
result = append(result, value)
}
@@ -82,6 +96,7 @@ func testExpectedResultForIndexInfo(t *testing.T, indexInfo schema.IndexInfo) []
int64(indexInfo.FileType),
indexInfo.SnapshotID,
int64(indexInfo.VolumeIndex),
+ indexInfo.MinorVersion,
}
}
@@ -97,8 +112,9 @@ func testExpectedResultForIndexEntry(t *testing.T, indexEntry schema.IndexEntry)
indexEntry.ID,
indexEntry.Size,
indexEntry.Offset,
- indexEntry.Checksum,
+ indexEntry.DataChecksum,
indexEntry.EncodedTags,
+ testIndexEntryChecksum, // Checksum auto-added to the end of the index entry
}
}
diff --git a/src/dbnode/persist/fs/msgpack/roundtrip_test.go b/src/dbnode/persist/fs/msgpack/roundtrip_test.go
index 8044b6a921..73a88eb0dd 100644
--- a/src/dbnode/persist/fs/msgpack/roundtrip_test.go
+++ b/src/dbnode/persist/fs/msgpack/roundtrip_test.go
@@ -48,16 +48,18 @@ var (
FileType: persist.FileSetSnapshotType,
SnapshotID: []byte("some_bytes"),
VolumeIndex: 1,
+ MinorVersion: schema.MinorVersion,
}
testIndexEntry = schema.IndexEntry{
- Index: 234,
- ID: []byte("testIndexEntry"),
- Size: 5456,
- Offset: 2390423,
- Checksum: 134245634534,
- EncodedTags: []byte("testEncodedTags"),
+ Index: 234,
+ ID: []byte("testIndexEntry"),
+ Size: 5456,
+ Offset: 2390423,
+ DataChecksum: 134245634534,
+ EncodedTags: []byte("testEncodedTags"),
}
+ testIndexEntryChecksum = int64(2611877657)
testIndexSummary = schema.IndexSummary{
Index: 234,
@@ -99,10 +101,10 @@ func TestIndexInfoRoundtrip(t *testing.T) {
require.Equal(t, testIndexInfo, res)
}
-// Make sure the V4 decoding code can handle the V1 file format.
+// Make sure the V5 decoding code can handle the V1 file format.
func TestIndexInfoRoundTripBackwardsCompatibilityV1(t *testing.T) {
var (
- opts = legacyEncodingOptions{encodeLegacyIndexInfoVersion: legacyEncodingIndexVersionV1}
+ opts = LegacyEncodingOptions{EncodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV1}
enc = newEncoder(opts)
dec = newDecoder(opts, nil)
)
@@ -117,16 +119,19 @@ func TestIndexInfoRoundTripBackwardsCompatibilityV1(t *testing.T) {
currFileType = testIndexInfo.FileType
currSnapshotID = testIndexInfo.SnapshotID
currVolumeIndex = testIndexInfo.VolumeIndex
+ currMinorVersion = testIndexInfo.MinorVersion
)
testIndexInfo.SnapshotTime = 0
testIndexInfo.FileType = 0
testIndexInfo.SnapshotID = nil
testIndexInfo.VolumeIndex = 0
+ testIndexInfo.MinorVersion = 0
defer func() {
testIndexInfo.SnapshotTime = currSnapshotTime
testIndexInfo.FileType = currFileType
testIndexInfo.SnapshotID = currSnapshotID
testIndexInfo.VolumeIndex = currVolumeIndex
+ testIndexInfo.MinorVersion = currMinorVersion
}()
enc.EncodeIndexInfo(testIndexInfo)
@@ -136,10 +141,10 @@ func TestIndexInfoRoundTripBackwardsCompatibilityV1(t *testing.T) {
require.Equal(t, testIndexInfo, res)
}
-// Make sure the V1 decoder code can handle the V4 file format.
+// Make sure the V1 decoder code can handle the V5 file format.
func TestIndexInfoRoundTripForwardsCompatibilityV1(t *testing.T) {
var (
- opts = legacyEncodingOptions{decodeLegacyIndexInfoVersion: legacyEncodingIndexVersionV1}
+ opts = LegacyEncodingOptions{DecodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV1}
enc = newEncoder(opts)
dec = newDecoder(opts, nil)
)
@@ -152,6 +157,7 @@ func TestIndexInfoRoundTripForwardsCompatibilityV1(t *testing.T) {
currFileType = testIndexInfo.FileType
currSnapshotID = testIndexInfo.SnapshotID
currVolumeIndex = testIndexInfo.VolumeIndex
+ currMinorVersion = testIndexInfo.MinorVersion
)
enc.EncodeIndexInfo(testIndexInfo)
@@ -162,11 +168,13 @@ func TestIndexInfoRoundTripForwardsCompatibilityV1(t *testing.T) {
testIndexInfo.FileType = 0
testIndexInfo.SnapshotID = nil
testIndexInfo.VolumeIndex = 0
+ testIndexInfo.MinorVersion = 0
defer func() {
testIndexInfo.SnapshotTime = currSnapshotTime
testIndexInfo.FileType = currFileType
testIndexInfo.SnapshotID = currSnapshotID
testIndexInfo.VolumeIndex = currVolumeIndex
+ testIndexInfo.MinorVersion = currMinorVersion
}()
dec.Reset(NewByteDecoderStream(enc.Bytes()))
@@ -175,10 +183,10 @@ func TestIndexInfoRoundTripForwardsCompatibilityV1(t *testing.T) {
require.Equal(t, testIndexInfo, res)
}
-// Make sure the V4 decoding code can handle the V2 file format.
+// Make sure the V5 decoding code can handle the V2 file format.
func TestIndexInfoRoundTripBackwardsCompatibilityV2(t *testing.T) {
var (
- opts = legacyEncodingOptions{encodeLegacyIndexInfoVersion: legacyEncodingIndexVersionV2}
+ opts = LegacyEncodingOptions{EncodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV2}
enc = newEncoder(opts)
dec = newDecoder(opts, nil)
)
@@ -192,16 +200,19 @@ func TestIndexInfoRoundTripBackwardsCompatibilityV2(t *testing.T) {
currFileType = testIndexInfo.FileType
currSnapshotID = testIndexInfo.SnapshotID
currVolumeIndex = testIndexInfo.VolumeIndex
+ currMinorVersion = testIndexInfo.MinorVersion
)
testIndexInfo.SnapshotTime = 0
testIndexInfo.FileType = 0
testIndexInfo.SnapshotID = nil
testIndexInfo.VolumeIndex = 0
+ testIndexInfo.MinorVersion = 0
defer func() {
testIndexInfo.SnapshotTime = currSnapshotTime
testIndexInfo.FileType = currFileType
testIndexInfo.SnapshotID = currSnapshotID
testIndexInfo.VolumeIndex = currVolumeIndex
+ testIndexInfo.MinorVersion = currMinorVersion
}()
enc.EncodeIndexInfo(testIndexInfo)
@@ -211,10 +222,10 @@ func TestIndexInfoRoundTripBackwardsCompatibilityV2(t *testing.T) {
require.Equal(t, testIndexInfo, res)
}
-// Make sure the V2 decoder code can handle the V4 file format.
+// Make sure the V2 decoder code can handle the V5 file format.
func TestIndexInfoRoundTripForwardsCompatibilityV2(t *testing.T) {
var (
- opts = legacyEncodingOptions{decodeLegacyIndexInfoVersion: legacyEncodingIndexVersionV2}
+ opts = LegacyEncodingOptions{DecodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV2}
enc = newEncoder(opts)
dec = newDecoder(opts, nil)
)
@@ -224,6 +235,7 @@ func TestIndexInfoRoundTripForwardsCompatibilityV2(t *testing.T) {
// because the old decoder won't read the new fields.
currSnapshotID := testIndexInfo.SnapshotID
currVolumeIndex := testIndexInfo.VolumeIndex
+ currMinorVersion := testIndexInfo.MinorVersion
enc.EncodeIndexInfo(testIndexInfo)
@@ -231,9 +243,11 @@ func TestIndexInfoRoundTripForwardsCompatibilityV2(t *testing.T) {
// encoded the data.
testIndexInfo.SnapshotID = nil
testIndexInfo.VolumeIndex = 0
+ testIndexInfo.MinorVersion = 0
defer func() {
testIndexInfo.SnapshotID = currSnapshotID
testIndexInfo.VolumeIndex = currVolumeIndex
+ testIndexInfo.MinorVersion = currMinorVersion
}()
dec.Reset(NewByteDecoderStream(enc.Bytes()))
@@ -242,10 +256,10 @@ func TestIndexInfoRoundTripForwardsCompatibilityV2(t *testing.T) {
require.Equal(t, testIndexInfo, res)
}
-// Make sure the V4 decoding code can handle the V3 file format.
+// Make sure the V5 decoding code can handle the V3 file format.
func TestIndexInfoRoundTripBackwardsCompatibilityV3(t *testing.T) {
var (
- opts = legacyEncodingOptions{encodeLegacyIndexInfoVersion: legacyEncodingIndexVersionV3}
+ opts = LegacyEncodingOptions{EncodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV3}
enc = newEncoder(opts)
dec = newDecoder(opts, nil)
)
@@ -255,11 +269,14 @@ func TestIndexInfoRoundTripBackwardsCompatibilityV3(t *testing.T) {
// because the new decoder won't try and read the new fields from
// the old file format.
var (
- currVolumeIndex = testIndexInfo.VolumeIndex
+ currVolumeIndex = testIndexInfo.VolumeIndex
+ currMinorVersion = testIndexInfo.MinorVersion
)
testIndexInfo.VolumeIndex = 0
+ testIndexInfo.MinorVersion = 0
defer func() {
testIndexInfo.VolumeIndex = currVolumeIndex
+ testIndexInfo.MinorVersion = currMinorVersion
}()
enc.EncodeIndexInfo(testIndexInfo)
@@ -269,26 +286,83 @@ func TestIndexInfoRoundTripBackwardsCompatibilityV3(t *testing.T) {
require.Equal(t, testIndexInfo, res)
}
-// Make sure the V3 decoder code can handle the V4 file format.
+// Make sure the V3 decoder code can handle the V5 file format.
func TestIndexInfoRoundTripForwardsCompatibilityV3(t *testing.T) {
var (
- opts = legacyEncodingOptions{decodeLegacyIndexInfoVersion: legacyEncodingIndexVersionV3}
+ opts = LegacyEncodingOptions{DecodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV3}
enc = newEncoder(opts)
dec = newDecoder(opts, nil)
)
- // Set the default values on the fields that did not exist in V2
+ // Set the default values on the fields that did not exist in V3
// and then restore them at the end of the test - This is required
// because the old decoder won't read the new fields.
currVolumeIndex := testIndexInfo.VolumeIndex
+ currMinorVersion := testIndexInfo.MinorVersion
enc.EncodeIndexInfo(testIndexInfo)
// Make sure to zero them before we compare, but after we have
// encoded the data.
testIndexInfo.VolumeIndex = 0
+ testIndexInfo.MinorVersion = 0
defer func() {
testIndexInfo.VolumeIndex = currVolumeIndex
+ testIndexInfo.MinorVersion = currMinorVersion
+ }()
+
+ dec.Reset(NewByteDecoderStream(enc.Bytes()))
+ res, err := dec.DecodeIndexInfo()
+ require.NoError(t, err)
+ require.Equal(t, testIndexInfo, res)
+}
+
+// Make sure the V5 decoding code can handle the V4 file format.
+func TestIndexInfoRoundTripBackwardsCompatibilityV4(t *testing.T) {
+ var (
+ opts = LegacyEncodingOptions{EncodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV4}
+ enc = newEncoder(opts)
+ dec = newDecoder(opts, nil)
+ )
+
+ // Set the default values on the fields that did not exist in V4,
+ // and then restore them at the end of the test - This is required
+ // because the new decoder won't try and read the new fields from
+ // the old file format.
+ currMinorVersion := testIndexInfo.MinorVersion
+
+ testIndexInfo.MinorVersion = 0
+ defer func() {
+ testIndexInfo.MinorVersion = currMinorVersion
+ }()
+
+ enc.EncodeIndexInfo(testIndexInfo)
+ dec.Reset(NewByteDecoderStream(enc.Bytes()))
+ res, err := dec.DecodeIndexInfo()
+ require.NoError(t, err)
+ require.Equal(t, testIndexInfo, res)
+}
+
+// Make sure the V4 decoder code can handle the V5 file format.
+func TestIndexInfoRoundTripForwardsCompatibilityV4(t *testing.T) {
+ var (
+ opts = LegacyEncodingOptions{DecodeLegacyIndexInfoVersion: LegacyEncodingIndexVersionV4}
+ enc = newEncoder(opts)
+ dec = newDecoder(opts, nil)
+ )
+
+ // Set the default values on the fields that did not exist in V4
+ // and then restore them at the end of the test - This is required
+ // because the old decoder won't read the new fields.
+ currMinorVersion := testIndexInfo.MinorVersion
+
+ enc.EncodeIndexInfo(testIndexInfo)
+
+ // Make sure to zero them before we compare, but after we have
+ // encoded the data.
+ testIndexInfo.MinorVersion = 0
+ defer func() {
+ testIndexInfo.MinorVersion = currMinorVersion
}()
dec.Reset(NewByteDecoderStream(enc.Bytes()))
@@ -324,12 +398,13 @@ func TestIndexEntryRoundtripWithBytesPool(t *testing.T) {
require.Equal(t, testIndexEntry, res)
}
-// Make sure the V2 decoding code can handle the V1 file format.
+// Make sure the V3 decoding code can handle the V1 file format.
func TestIndexEntryRoundTripBackwardsCompatibilityV1(t *testing.T) {
var (
- opts = legacyEncodingOptions{encodeLegacyV1IndexEntry: true}
- enc = newEncoder(opts)
- dec = newDecoder(opts, nil)
+ opts = LegacyEncodingOptions{EncodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionV1,
+ DecodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionCurrent}
+ enc = newEncoder(opts)
+ dec = newDecoder(opts, nil)
)
// Set the default values on the fields that did not exist in V1
@@ -337,7 +412,9 @@ func TestIndexEntryRoundTripBackwardsCompatibilityV1(t *testing.T) {
// because the new decoder won't try and read the new fields from
// the old file format.
currEncodedTags := testIndexEntry.EncodedTags
+
testIndexEntry.EncodedTags = nil
+
defer func() {
testIndexEntry.EncodedTags = currEncodedTags
}()
@@ -349,10 +426,10 @@ func TestIndexEntryRoundTripBackwardsCompatibilityV1(t *testing.T) {
require.Equal(t, testIndexEntry, res)
}
-// Make sure the V1 decoder code can handle the V2 file format.
-func TestIndexEntryRoundTripForwardsCompatibilityV2(t *testing.T) {
+// Make sure the V1 decoder code can handle the V3 file format.
+func TestIndexEntryRoundTripForwardsCompatibilityV1(t *testing.T) {
var (
- opts = legacyEncodingOptions{decodeLegacyV1IndexEntry: true}
+ opts = LegacyEncodingOptions{DecodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionV1}
enc = newEncoder(opts)
dec = newDecoder(opts, nil)
)
@@ -377,6 +454,45 @@ func TestIndexEntryRoundTripForwardsCompatibilityV2(t *testing.T) {
require.Equal(t, testIndexEntry, res)
}
+// Make sure the V3 decoding code can handle the V2 file format.
+func TestIndexEntryRoundTripBackwardsCompatibilityV2(t *testing.T) {
+ var (
+ opts = LegacyEncodingOptions{EncodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionV2,
+ DecodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionCurrent}
+ enc = newEncoder(opts)
+ dec = newDecoder(opts, nil)
+ )
+
+ // The additional field added to V3 is the index entry checksum that's transparently used by the encoder
+ // and decoder and is never set on the IndexEntry struct. Therefore, no need to zero out any field in the struct
+ // to make a comparison.
+
+ enc.EncodeIndexEntry(testIndexEntry)
+ dec.Reset(NewByteDecoderStream(enc.Bytes()))
+ res, err := dec.DecodeIndexEntry(nil)
+ require.NoError(t, err)
+ require.Equal(t, testIndexEntry, res)
+}
+
+// Make sure the V2 decoder code can handle the V3 file format.
+func TestIndexEntryRoundTripForwardsCompatibilityV2(t *testing.T) {
+ var (
+ opts = LegacyEncodingOptions{DecodeLegacyIndexEntryVersion: LegacyEncodingIndexEntryVersionV2}
+ enc = newEncoder(opts)
+ dec = newDecoder(opts, nil)
+ )
+
+ // The additional field added to V3 is the index entry checksum that's transparently used by the encoder
+ // and decoder and is never set on the IndexEntry struct. Therefore, no need to zero out any field in the struct
+ // to make a comparison.
+
+ enc.EncodeIndexEntry(testIndexEntry)
+ dec.Reset(NewByteDecoderStream(enc.Bytes()))
+ res, err := dec.DecodeIndexEntry(nil)
+ require.NoError(t, err)
+ require.Equal(t, testIndexEntry, res)
+}
+
func TestIndexSummaryRoundtrip(t *testing.T) {
var (
enc = NewEncoder()
diff --git a/src/dbnode/persist/fs/msgpack/schema.go b/src/dbnode/persist/fs/msgpack/schema.go
index 2ee7869c03..d455afb951 100644
--- a/src/dbnode/persist/fs/msgpack/schema.go
+++ b/src/dbnode/persist/fs/msgpack/schema.go
@@ -97,12 +97,12 @@ const (
// curr number of fields specifies the number of fields that the current
// version of the M3DB will encode. This is used to ensure that the
// correct number of fields is encoded into the files. These values need
- // to be incremened whenever we add new fields to an object.
+ // to be incremented whenever we add new fields to an object.
currNumRootObjectFields = 2
- currNumIndexInfoFields = 10
+ currNumIndexInfoFields = 11
currNumIndexSummariesInfoFields = 1
currNumIndexBloomFilterInfoFields = 2
- currNumIndexEntryFields = 6
+ currNumIndexEntryFields = 7
currNumIndexSummaryFields = 3
currNumLogInfoFields = 3
currNumLogEntryFields = 7
diff --git a/src/dbnode/persist/fs/msgpack/stream_with_digest.go b/src/dbnode/persist/fs/msgpack/stream_with_digest.go
new file mode 100644
index 0000000000..ff03e2c45d
--- /dev/null
+++ b/src/dbnode/persist/fs/msgpack/stream_with_digest.go
@@ -0,0 +1,146 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package msgpack
+
+import (
+ "errors"
+ "hash"
+ "hash/adler32"
+)
+
+var (
+ // errChecksumMismatch returned when the calculated checksum doesn't match the stored checksum
+ errChecksumMismatch = errors.New("calculated checksum doesn't match stored checksum")
+)
+
+var _ DecoderStream = &decoderStreamWithDigest{}
+
+// decoderStreamWithDigest calculates the digest as it processes a decoder stream.
+type decoderStreamWithDigest struct {
+ reader DecoderStream
+ readerDigest hash.Hash32
+ unreadByte bool
+ enabled bool
+ singleByteBuf []byte
+}
+
+// newDecoderStreamWithDigest returns a new decoderStreamWithDigest
+func newDecoderStreamWithDigest(reader DecoderStream) *decoderStreamWithDigest {
+ return &decoderStreamWithDigest{
+ reader: reader,
+ readerDigest: adler32.New(),
+ singleByteBuf: make([]byte, 1),
+ }
+}
+
+func (d *decoderStreamWithDigest) Read(p []byte) (n int, err error) {
+ n, err = d.reader.Read(p)
+ if err != nil {
+ return n, err
+ }
+ if n <= 0 {
+ return n, nil
+ }
+
+ start := 0
+ if d.unreadByte {
+ d.unreadByte = false
+ start++
+ }
+ if d.enabled {
+ if _, err := d.readerDigest.Write(p[start:n]); err != nil {
+ return 0, err
+ }
+ }
+ return n, err
+}
+
+func (d *decoderStreamWithDigest) ReadByte() (byte, error) {
+ b, err := d.reader.ReadByte()
+ if err != nil {
+ return 0, err
+ }
+
+ if d.unreadByte {
+ d.unreadByte = false
+ } else if d.enabled {
+ d.singleByteBuf[0] = b
+ if _, err := d.readerDigest.Write(d.singleByteBuf); err != nil {
+ return b, err
+ }
+ }
+ return b, err
+}
+
+func (d *decoderStreamWithDigest) UnreadByte() error {
+ err := d.reader.UnreadByte()
+ if err == nil {
+ d.unreadByte = true
+ }
+ return err
+}
+
+// reset resets the reader for use with a new reader.
+func (d *decoderStreamWithDigest) reset(stream DecoderStream) {
+ d.reader = stream
+ d.readerDigest.Reset()
+}
+
+// digest returns the digest
+func (d *decoderStreamWithDigest) digest() hash.Hash32 {
+ return d.readerDigest
+}
+
+// validate compares the current digest against the expected digest and returns
+// an error if they don't match.
+func (d *decoderStreamWithDigest) validate(expectedDigest uint32) error {
+ if d.readerDigest.Sum32() != expectedDigest {
+ return errChecksumMismatch
+ }
+ return nil
+}
+
+// capture provides a mechanism for manually capturing bytes to add to digest when reader is manipulated
+// through atypical means (e.g. reading directly from the backing byte slice of a ByteReader)
+func (d *decoderStreamWithDigest) capture(bytes []byte) error {
+ // No-op if not actually capturing at the moment
+ if d.enabled {
+ if _, err := d.readerDigest.Write(bytes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// setDigestReaderEnabled enables calculating of digest for bytes read. If this is false, behaves like a regular reader.
+func (d *decoderStreamWithDigest) setDigestReaderEnabled(enabled bool) {
+ if !d.enabled && enabled {
+ d.enabled = true
+ d.readerDigest.Reset()
+ } else if d.enabled && !enabled {
+ d.enabled = false
+ }
+}
+
+// Returns the decoder stream wrapped by this object
+func (d *decoderStreamWithDigest) wrappedStream() DecoderStream {
+ return d.reader
+}
diff --git a/src/dbnode/persist/fs/msgpack/stream_with_digest_test.go b/src/dbnode/persist/fs/msgpack/stream_with_digest_test.go
new file mode 100644
index 0000000000..87041a73fb
--- /dev/null
+++ b/src/dbnode/persist/fs/msgpack/stream_with_digest_test.go
@@ -0,0 +1,192 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package msgpack
+
+import (
+ "bufio"
+ "bytes"
+ "hash/adler32"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+const srcString = "foo bar baz qux quux corge grault"
+
+func TestDecoderStreamWithDigestRead(t *testing.T) {
+ stream := newTestDecoderStream()
+
+ // Read srcString in chunkLen size chunks
+ chunkLen := 3
+ buf := make([]byte, len(srcString))
+ for start := 0; start < len(srcString); start = start + chunkLen {
+ end := start + chunkLen
+ if end > len(srcString) {
+ end = len(srcString)
+ }
+
+ n, err := stream.Read(buf[start:end])
+ require.NoError(t, err)
+ require.Equal(t, chunkLen, n)
+ require.Equal(t, adler32.Checksum(buf[:end]), stream.digest().Sum32())
+ }
+}
+
+func TestDecoderStreamWithDigestReadByte(t *testing.T) {
+ stream := newTestDecoderStream()
+
+ buf := make([]byte, len(srcString))
+ for i := 1; i < len(srcString); i++ {
+ n, err := stream.Read(buf[i-1 : i])
+ require.NoError(t, err)
+ require.Equal(t, 1, n)
+ require.Equal(t, adler32.Checksum(buf[:i]), stream.digest().Sum32())
+ }
+}
+
+func TestDecoderStreamWithDigestUnreadByte(t *testing.T) {
+ stream := decoderStreamWithDigest{
+ reader: bufio.NewReader(bytes.NewReader([]byte(srcString))),
+ readerDigest: adler32.New(),
+ }
+
+ b, err := stream.ReadByte()
+ require.NoError(t, err)
+ require.Equal(t, srcString[0], b)
+ require.False(t, stream.unreadByte)
+
+ err = stream.UnreadByte()
+ require.NoError(t, err)
+ require.True(t, stream.unreadByte)
+}
+
+func TestDecoderStreamWithDigestReset(t *testing.T) {
+ stream := newTestDecoderStream()
+
+ b, err := stream.ReadByte()
+ require.NoError(t, err)
+ require.Equal(t, srcString[0], b)
+
+ b, err = stream.ReadByte()
+ require.NoError(t, err)
+ require.Equal(t, srcString[1], b)
+
+ stream.reset(bufio.NewReader(bytes.NewReader([]byte(srcString))))
+
+ b, err = stream.ReadByte()
+ require.NoError(t, err)
+ require.Equal(t, srcString[0], b)
+}
+
+func TestDecoderStreamWithDigestValidate(t *testing.T) {
+ stream := newTestDecoderStream()
+ buf := make([]byte, 5)
+
+ n, err := stream.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, 5, n)
+
+ require.NoError(t, stream.validate(adler32.Checksum(buf)))
+ require.Error(t, stream.validate(adler32.Checksum([]byte("asdf"))))
+}
+
+func TestDecoderStreamWithDigestCapture(t *testing.T) {
+ stream := newTestDecoderStream()
+
+ require.NoError(t, stream.validate(1))
+
+ bytes := []byte("manual capture")
+ require.NoError(t, stream.capture(bytes))
+
+ require.Equal(t, adler32.Checksum(bytes), stream.digest().Sum32())
+}
+
+func TestDecoderStreamWithDigestReadUnreadRead(t *testing.T) {
+ stream := newTestDecoderStream()
+
+ buf := make([]byte, len(srcString))
+ end := 0
+
+ b1, err := stream.ReadByte()
+ require.NoError(t, err)
+ buf[0] = b1
+ end++
+ require.Equal(t, adler32.Checksum(buf[:end]), stream.digest().Sum32())
+
+ err = stream.UnreadByte()
+ end--
+ require.NoError(t, err)
+
+ b2, err := stream.ReadByte()
+ require.NoError(t, err)
+ end++
+ require.Equal(t, b1, b2)
+ require.Equal(t, adler32.Checksum(buf[:end]), stream.digest().Sum32())
+
+ n, err := stream.Read(buf[end : end+4])
+ require.NoError(t, err)
+ require.Equal(t, 4, n)
+ end += n
+ require.Equal(t, adler32.Checksum(buf[:end]), stream.digest().Sum32())
+
+ err = stream.UnreadByte()
+ end--
+ require.NoError(t, err)
+
+ n, err = stream.Read(buf[end : end+4])
+ require.NoError(t, err)
+ require.Equal(t, 4, n)
+ end += n
+ require.Equal(t, adler32.Checksum(buf[:end]), stream.digest().Sum32())
+}
+
+func TestDecoderStreamWithDigestSetEnabled(t *testing.T) {
+ stream := newTestDecoderStream()
+
+ // Disable digest calculation
+ stream.setDigestReaderEnabled(false)
+
+ buf := make([]byte, 5)
+ _, err := stream.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, stream.digest().Sum32(), uint32(1))
+
+ _, err = stream.ReadByte()
+ require.NoError(t, err)
+ require.Equal(t, stream.digest().Sum32(), uint32(1))
+
+ // Enable digest calculation
+ stream.setDigestReaderEnabled(true)
+
+ _, err = stream.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, stream.digest().Sum32(), adler32.Checksum([]byte(srcString[6:11])))
+
+ _, err = stream.ReadByte()
+ require.NoError(t, err)
+ require.Equal(t, stream.digest().Sum32(), adler32.Checksum([]byte(srcString[6:12])))
+}
+
+func newTestDecoderStream() *decoderStreamWithDigest {
+ d := newDecoderStreamWithDigest(bufio.NewReader(bytes.NewReader([]byte(srcString))))
+ d.setDigestReaderEnabled(true)
+ return d
+}
diff --git a/src/dbnode/persist/fs/noop_merge_with.go b/src/dbnode/persist/fs/noop_merge_with.go
new file mode 100644
index 0000000000..23286974ac
--- /dev/null
+++ b/src/dbnode/persist/fs/noop_merge_with.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fs
+
+import (
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/x/context"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+type noopMergeWith struct{}
+
+// NewNoopMergeWith creates a new noopMergeWith object. NoopMergeWith can be used
+// in conjunction with the merge.Merger to generate copies of filesets.
+func NewNoopMergeWith() MergeWith {
+ return &noopMergeWith{}
+}
+
+func (m *noopMergeWith) Read(
+ _ context.Context,
+ _ ident.ID,
+ _ xtime.UnixNano,
+ _ namespace.Context,
+) ([]xio.BlockReader, bool, error) {
+ return nil, false, nil
+}
+
+func (m *noopMergeWith) ForEachRemaining(
+ _ context.Context,
+ _ xtime.UnixNano,
+ _ ForEachRemainingFn,
+ _ namespace.Context,
+) error {
+ return nil
+}
diff --git a/src/dbnode/persist/fs/options.go b/src/dbnode/persist/fs/options.go
index 7db96c212b..6aa6d246ef 100644
--- a/src/dbnode/persist/fs/options.go
+++ b/src/dbnode/persist/fs/options.go
@@ -67,12 +67,18 @@ const (
// defaultForceIndexBloomFilterMmapMemory is the default configuration for whether the bytes for the bloom filter
// should be mmap'd as an anonymous region (forced completely into memory) or mmap'd as a file.
defaultForceIndexBloomFilterMmapMemory = false
+
+ // defaultIndexReaderAutovalidateIndexSegments is the default configuration for
+ // whether or not the index reader should autovalidate the index segments when
+ // opening segments. This is an expensive operation and should be done post-open.
+ defaultIndexReaderAutovalidateIndexSegments = false
)
var (
defaultFilePathPrefix = os.TempDir()
defaultNewFileMode = os.FileMode(0666)
defaultNewDirectoryMode = os.ModeDir | os.FileMode(0755)
+ defaultFSTWriterOptions = fst.WriterOptions{}
errTagEncoderPoolNotSet = errors.New("tag encoder pool is not set")
errTagDecoderPoolNotSet = errors.New("tag decoder pool is not set")
@@ -96,10 +102,13 @@ type options struct {
tagEncoderPool serialize.TagEncoderPool
tagDecoderPool serialize.TagDecoderPool
fstOptions fst.Options
+ fstWriterOptions fst.WriterOptions
forceIndexSummariesMmapMemory bool
forceBloomFilterMmapMemory bool
mmapEnableHugePages bool
mmapReporter mmap.Reporter
+ indexReaderAutovalidateIndexSegments bool
+ encodingOptions msgpack.LegacyEncodingOptions
}
// NewOptions creates a new set of fs options
@@ -108,7 +117,8 @@ func NewOptions() Options {
serialize.NewTagEncoderOptions(), pool.NewObjectPoolOptions())
tagEncoderPool.Init()
tagDecoderPool := serialize.NewTagDecoderPool(
- serialize.NewTagDecoderOptions(), pool.NewObjectPoolOptions())
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
+ pool.NewObjectPoolOptions())
tagDecoderPool.Init()
fstOptions := fst.NewOptions()
@@ -133,6 +143,9 @@ func NewOptions() Options {
tagEncoderPool: tagEncoderPool,
tagDecoderPool: tagDecoderPool,
fstOptions: fstOptions,
+ fstWriterOptions: defaultFSTWriterOptions,
+ indexReaderAutovalidateIndexSegments: defaultIndexReaderAutovalidateIndexSegments,
+ encodingOptions: msgpack.DefaultLegacyEncodingOptions,
}
}
@@ -356,6 +369,16 @@ func (o *options) FSTOptions() fst.Options {
return o.fstOptions
}
+func (o *options) SetFSTWriterOptions(value fst.WriterOptions) Options {
+ opts := *o
+ opts.fstWriterOptions = value
+ return &opts
+}
+
+func (o *options) FSTWriterOptions() fst.WriterOptions {
+ return o.fstWriterOptions
+}
+
func (o *options) SetMmapReporter(mmapReporter mmap.Reporter) Options {
opts := *o
opts.mmapReporter = mmapReporter
@@ -365,3 +388,23 @@ func (o *options) SetMmapReporter(mmapReporter mmap.Reporter) Options {
func (o *options) MmapReporter() mmap.Reporter {
return o.mmapReporter
}
+
+func (o *options) SetIndexReaderAutovalidateIndexSegments(value bool) Options {
+ opts := *o
+ opts.indexReaderAutovalidateIndexSegments = value
+ return &opts
+}
+
+func (o *options) IndexReaderAutovalidateIndexSegments() bool {
+ return o.indexReaderAutovalidateIndexSegments
+}
+
+func (o *options) SetEncodingOptions(value msgpack.LegacyEncodingOptions) Options {
+ opts := *o
+ opts.encodingOptions = value
+ return &opts
+}
+
+func (o *options) EncodingOptions() msgpack.LegacyEncodingOptions {
+ return o.encodingOptions
+}
diff --git a/src/dbnode/persist/fs/persist_manager.go b/src/dbnode/persist/fs/persist_manager.go
index ab343cec3c..752e48f656 100644
--- a/src/dbnode/persist/fs/persist_manager.go
+++ b/src/dbnode/persist/fs/persist_manager.go
@@ -35,7 +35,7 @@ import (
m3ninxfs "github.com/m3db/m3/src/m3ninx/index/segment/fst"
m3ninxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
- "github.com/m3db/m3/src/x/ident"
+ xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/instrument"
"github.com/pborman/uuid"
@@ -92,6 +92,8 @@ type persistManager struct {
slept time.Duration
metrics persistManagerMetrics
+
+ runtimeOptsListener xclose.SimpleCloser
}
type dataPersistManager struct {
@@ -165,7 +167,9 @@ func NewPersistManager(opts Options) (persist.Manager, error) {
if err != nil {
return nil, err
}
- segmentWriter, err := m3ninxpersist.NewMutableSegmentFileSetWriter()
+
+ segmentWriter, err := m3ninxpersist.NewMutableSegmentFileSetWriter(
+ opts.FSTWriterOptions())
if err != nil {
return nil, err
}
@@ -190,7 +194,8 @@ func NewPersistManager(opts Options) (persist.Manager, error) {
}
pm.indexPM.newReaderFn = NewIndexReader
pm.indexPM.newPersistentSegmentFn = m3ninxpersist.NewSegment
- opts.RuntimeOptionsManager().RegisterListener(pm)
+ pm.runtimeOptsListener = opts.RuntimeOptionsManager().RegisterListener(pm)
+
return pm, nil
}
@@ -269,10 +274,11 @@ func (pm *persistManager) PrepareIndex(opts persist.IndexPrepareOptions) (persis
}
blockSize := nsMetadata.Options().IndexOptions().BlockSize()
idxWriterOpts := IndexWriterOpenOptions{
- BlockSize: blockSize,
- FileSetType: opts.FileSetType,
- Identifier: fileSetID,
- Shards: opts.Shards,
+ BlockSize: blockSize,
+ FileSetType: opts.FileSetType,
+ Identifier: fileSetID,
+ Shards: opts.Shards,
+ IndexVolumeType: opts.IndexVolumeType,
}
// create writer for required fileset file.
@@ -485,13 +491,13 @@ func (pm *persistManager) PrepareData(opts persist.DataPrepareOptions) (persist.
prepared.Persist = pm.persist
prepared.Close = pm.closeData
+ prepared.DeferClose = pm.deferCloseData
return prepared, nil
}
func (pm *persistManager) persist(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
segment ts.Segment,
checksum uint32,
) error {
@@ -523,7 +529,7 @@ func (pm *persistManager) persist(
pm.dataPM.segmentHolder[0] = segment.Head
pm.dataPM.segmentHolder[1] = segment.Tail
- err := pm.dataPM.writer.WriteAll(id, tags, pm.dataPM.segmentHolder, checksum)
+ err := pm.dataPM.writer.WriteAll(metadata, pm.dataPM.segmentHolder, checksum)
pm.count++
pm.bytesWritten += int64(segment.Len())
@@ -539,6 +545,10 @@ func (pm *persistManager) closeData() error {
return pm.dataPM.writer.Close()
}
+func (pm *persistManager) deferCloseData() (persist.DataCloser, error) {
+ return pm.dataPM.writer.DeferClose()
+}
+
// DoneFlush is called by the databaseFlushManager to finish the data persist process.
func (pm *persistManager) DoneFlush() error {
pm.Lock()
@@ -592,6 +602,11 @@ func (pm *persistManager) DoneSnapshot(
return pm.doneShared()
}
+// Close all resources.
+func (pm *persistManager) Close() {
+ pm.runtimeOptsListener.Close()
+}
+
func (pm *persistManager) doneShared() error {
// Emit timing metrics
pm.metrics.writeDurationMs.Update(float64(pm.worked / time.Millisecond))
diff --git a/src/dbnode/persist/fs/persist_manager_test.go b/src/dbnode/persist/fs/persist_manager_test.go
index dcdea6ce54..b582680671 100644
--- a/src/dbnode/persist/fs/persist_manager_test.go
+++ b/src/dbnode/persist/fs/persist_manager_test.go
@@ -193,10 +193,12 @@ func TestPersistenceManagerPrepareSuccess(t *testing.T) {
tags = ident.NewTags(ident.StringTag("bar", "baz"))
head = checked.NewBytes([]byte{0x1, 0x2}, nil)
tail = checked.NewBytes([]byte{0x3, 0x4}, nil)
- segment = ts.NewSegment(head, tail, ts.FinalizeNone)
- checksum = digest.SegmentChecksum(segment)
+ segment = ts.NewSegment(head, tail, 0, ts.FinalizeNone)
+ checksum = segment.CalculateChecksum()
)
- writer.EXPECT().WriteAll(id, tags, gomock.Any(), checksum).Return(nil)
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ writer.EXPECT().WriteAll(metadata, gomock.Any(), checksum).Return(nil)
writer.EXPECT().Close()
flush, err := pm.StartFlushPersist()
@@ -221,7 +223,7 @@ func TestPersistenceManagerPrepareSuccess(t *testing.T) {
require.Nil(t, err)
- require.Nil(t, prepared.Persist(id, tags, segment, checksum))
+ require.Nil(t, prepared.Persist(metadata, segment, checksum))
require.True(t, pm.start.Equal(now))
require.Equal(t, 124, pm.count)
@@ -263,10 +265,12 @@ func TestPersistenceManagerPrepareSnapshotSuccess(t *testing.T) {
tags = ident.NewTags(ident.StringTag("bar", "baz"))
head = checked.NewBytes([]byte{0x1, 0x2}, nil)
tail = checked.NewBytes([]byte{0x3, 0x4}, nil)
- segment = ts.NewSegment(head, tail, ts.FinalizeNone)
- checksum = digest.SegmentChecksum(segment)
+ segment = ts.NewSegment(head, tail, 0, ts.FinalizeNone)
+ checksum = segment.CalculateChecksum()
)
- writer.EXPECT().WriteAll(id, tags, gomock.Any(), checksum).Return(nil)
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ writer.EXPECT().WriteAll(metadata, gomock.Any(), checksum).Return(nil)
writer.EXPECT().Close()
flush, err := pm.StartSnapshotPersist(testSnapshotID)
@@ -291,7 +295,7 @@ func TestPersistenceManagerPrepareSnapshotSuccess(t *testing.T) {
require.Nil(t, err)
- require.Nil(t, prepared.Persist(id, tags, segment, checksum))
+ require.Nil(t, prepared.Persist(metadata, segment, checksum))
require.True(t, pm.start.Equal(now))
require.Equal(t, 124, pm.count)
@@ -497,14 +501,19 @@ func TestPersistenceManagerNoRateLimit(t *testing.T) {
tags = ident.NewTags(ident.StringTag("bar", "baz"))
head = checked.NewBytes([]byte{0x1, 0x2}, nil)
tail = checked.NewBytes([]byte{0x3}, nil)
- segment = ts.NewSegment(head, tail, ts.FinalizeNone)
- checksum = digest.SegmentChecksum(segment)
+ segment = ts.NewSegment(head, tail, 0, ts.FinalizeNone)
+ checksum = segment.CalculateChecksum()
)
pm.nowFn = func() time.Time { return now }
pm.sleepFn = func(d time.Duration) { slept += d }
- writer.EXPECT().WriteAll(id, tags, pm.dataPM.segmentHolder, checksum).Return(nil).Times(2)
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ writer.EXPECT().
+ WriteAll(metadata, pm.dataPM.segmentHolder, checksum).
+ Return(nil).
+ Times(2)
flush, err := pm.StartFlushPersist()
require.NoError(t, err)
@@ -524,11 +533,11 @@ func TestPersistenceManagerNoRateLimit(t *testing.T) {
// Start persistence
now = time.Now()
- require.NoError(t, prepared.Persist(id, tags, segment, checksum))
+ require.NoError(t, prepared.Persist(metadata, segment, checksum))
// Advance time and write again
now = now.Add(time.Millisecond)
- require.NoError(t, prepared.Persist(id, tags, segment, checksum))
+ require.NoError(t, prepared.Persist(metadata, segment, checksum))
// Check there is no rate limiting
require.Equal(t, time.Duration(0), slept)
@@ -552,8 +561,8 @@ func TestPersistenceManagerWithRateLimit(t *testing.T) {
id = ident.StringID("foo")
head = checked.NewBytes([]byte{0x1, 0x2}, nil)
tail = checked.NewBytes([]byte{0x3}, nil)
- segment = ts.NewSegment(head, tail, ts.FinalizeNone)
- checksum = digest.SegmentChecksum(segment)
+ segment = ts.NewSegment(head, tail, 0, ts.FinalizeNone)
+ checksum = segment.CalculateChecksum()
)
pm.nowFn = func() time.Time { return now }
@@ -567,8 +576,13 @@ func TestPersistenceManagerWithRateLimit(t *testing.T) {
},
BlockSize: testBlockSize,
}, m3test.IdentTransformer)
+ metadata := persist.NewMetadataFromIDAndTags(id, ident.Tags{},
+ persist.MetadataOptions{})
writer.EXPECT().Open(writerOpts).Return(nil).Times(iter)
- writer.EXPECT().WriteAll(id, ident.Tags{}, pm.dataPM.segmentHolder, checksum).Return(nil).AnyTimes()
+ writer.EXPECT().
+ WriteAll(metadata, pm.dataPM.segmentHolder, checksum).
+ Return(nil).
+ AnyTimes()
writer.EXPECT().Close().Times(iter)
// Enable rate limiting
@@ -607,21 +621,21 @@ func TestPersistenceManagerWithRateLimit(t *testing.T) {
// Start persistence
now = time.Now()
- require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum))
+ require.NoError(t, prepared.Persist(metadata, segment, checksum))
// Assert we don't rate limit if the count is not enough yet
- require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum))
+ require.NoError(t, prepared.Persist(metadata, segment, checksum))
require.Equal(t, time.Duration(0), slept)
// Advance time and check we rate limit if the disk throughput exceeds the limit
now = now.Add(time.Microsecond)
- require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum))
+ require.NoError(t, prepared.Persist(metadata, segment, checksum))
require.Equal(t, time.Duration(1861), slept)
// Advance time and check we don't rate limit if the disk throughput is below the limit
- require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum))
+ require.NoError(t, prepared.Persist(metadata, segment, checksum))
now = now.Add(time.Second - time.Microsecond)
- require.NoError(t, prepared.Persist(id, ident.Tags{}, segment, checksum))
+ require.NoError(t, prepared.Persist(metadata, segment, checksum))
require.Equal(t, time.Duration(1861), slept)
require.Equal(t, int64(15), pm.bytesWritten)
diff --git a/src/dbnode/persist/fs/read.go b/src/dbnode/persist/fs/read.go
index 274bd7c255..68bef48ef8 100644
--- a/src/dbnode/persist/fs/read.go
+++ b/src/dbnode/persist/fs/read.go
@@ -50,6 +50,9 @@ var (
// errReadNotExpectedSize returned when the size of the next read does not match size specified by the index
errReadNotExpectedSize = errors.New("next read not expected size")
+
+ // errReadMetadataOptimizedForRead returned when we optimized for only reading metadata but are attempting a regular read
+ errReadMetadataOptimizedForRead = errors.New("read metadata optimized for regular read")
)
const (
@@ -99,6 +102,10 @@ type reader struct {
shard uint32
volume int
open bool
+ // NB(bodu): Informs whether or not we optimize for only reading
+ // metadata. We don't need to sort for reading metadata but sorting is
+ // required if we are performing regulars reads.
+ optimizedReadMetadataOnly bool
}
// NewReader returns a new reader and expects all files to exist. Will read the
@@ -271,6 +278,7 @@ func (r *reader) Open(opts DataReaderOpenOptions) error {
r.open = true
r.namespace = namespace
r.shard = shard
+ r.optimizedReadMetadataOnly = opts.OptimizedReadMetadataOnly
return nil
}
@@ -337,13 +345,20 @@ func (r *reader) readIndexAndSortByOffsetAsc() error {
}
r.indexEntriesByOffsetAsc = append(r.indexEntriesByOffsetAsc, entry)
}
- // NB(r): As we decode each block we need access to each index entry
- // in the order we decode the data
- sort.Sort(indexEntriesByOffsetAsc(r.indexEntriesByOffsetAsc))
+ // This is false by default so we always sort unless otherwise specified.
+ if !r.optimizedReadMetadataOnly {
+ // NB(r): As we decode each block we need access to each index entry
+ // in the order we decode the data. This is only required for regular reads.
+ sort.Sort(indexEntriesByOffsetAsc(r.indexEntriesByOffsetAsc))
+ }
return nil
}
func (r *reader) Read() (ident.ID, ident.TagIterator, checked.Bytes, uint32, error) {
+ // NB(bodu): We cannot perform regular reads if we're optimizing for only reading metadata.
+ if r.optimizedReadMetadataOnly {
+ return nil, nil, nil, 0, errReadMetadataOptimizedForRead
+ }
if r.entries > 0 && len(r.indexEntriesByOffsetAsc) < r.entries {
// Have not read the index yet, this is required when reading
// data as we need each index entry in order by by the offset ascending
@@ -382,7 +397,7 @@ func (r *reader) Read() (ident.ID, ident.TagIterator, checked.Bytes, uint32, err
tags := r.entryClonedEncodedTagsIter(entry.EncodedTags)
r.entriesRead++
- return id, tags, data, uint32(entry.Checksum), nil
+ return id, tags, data, uint32(entry.DataChecksum), nil
}
func (r *reader) ReadMetadata() (ident.ID, ident.TagIterator, int, uint32, error) {
@@ -394,7 +409,7 @@ func (r *reader) ReadMetadata() (ident.ID, ident.TagIterator, int, uint32, error
id := r.entryClonedID(entry.ID)
tags := r.entryClonedEncodedTagsIter(entry.EncodedTags)
length := int(entry.Size)
- checksum := uint32(entry.Checksum)
+ checksum := uint32(entry.DataChecksum)
r.metadataRead++
return id, tags, length, checksum, nil
diff --git a/src/dbnode/persist/fs/read_test.go b/src/dbnode/persist/fs/read_test.go
index 573064d4d9..5f6c3ca6aa 100644
--- a/src/dbnode/persist/fs/read_test.go
+++ b/src/dbnode/persist/fs/read_test.go
@@ -31,6 +31,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/digest"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/dbnode/persist/schema"
"github.com/m3db/m3/src/x/checked"
@@ -90,7 +91,8 @@ func init() {
})
testBytesPool.Init()
testTagDecoderPool = serialize.NewTagDecoderPool(
- serialize.NewTagDecoderOptions(), pool.NewObjectPoolOptions())
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
+ pool.NewObjectPoolOptions())
testTagDecoderPool.Init()
}
@@ -167,10 +169,13 @@ func TestReadDataError(t *testing.T) {
BlockStart: testWriterStart,
},
}
+ metadata := persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{})
err = w.Open(writerOpts)
require.NoError(t, err)
- require.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ require.NoError(t, w.Write(metadata,
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 3})))
require.NoError(t, w.Close())
@@ -220,12 +225,15 @@ func TestReadDataUnexpectedSize(t *testing.T) {
BlockStart: testWriterStart,
},
}
+ metadata := persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{})
err = w.Open(writerOpts)
assert.NoError(t, err)
dataFile := w.(*writer).dataFdWithDigest.Fd().Name()
- assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ assert.NoError(t, w.Write(metadata,
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 3})))
assert.NoError(t, w.Close())
@@ -307,10 +315,13 @@ func testReadOpen(t *testing.T, fileData map[string][]byte) {
BlockStart: start,
},
}
+ metadata := persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{})
assert.NoError(t, w.Open(writerOpts))
- assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ assert.NoError(t, w.Write(metadata,
bytesRefd([]byte{0x1}),
digest.Checksum([]byte{0x1})))
assert.NoError(t, w.Close())
@@ -400,10 +411,13 @@ func TestReadValidate(t *testing.T) {
BlockStart: start,
},
}
+ metadata := persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{})
require.NoError(t, w.Open(writerOpts))
- assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ assert.NoError(t, w.Write(metadata,
bytesRefd([]byte{0x1}),
digest.Checksum([]byte{0x1})))
require.NoError(t, w.Close())
diff --git a/src/dbnode/persist/fs/read_write_test.go b/src/dbnode/persist/fs/read_write_test.go
index edc10276be..c931946172 100644
--- a/src/dbnode/persist/fs/read_write_test.go
+++ b/src/dbnode/persist/fs/read_write_test.go
@@ -29,7 +29,7 @@ import (
"testing"
"time"
- "github.com/m3db/bloom"
+ "github.com/m3db/bloom/v4"
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/x/checked"
@@ -124,9 +124,10 @@ func writeTestDataWithVolume(
assert.NoError(t, err)
for i := range entries {
- assert.NoError(t, w.Write(
- entries[i].ID(),
+ metadata := persist.NewMetadataFromIDAndTags(entries[i].ID(),
entries[i].Tags(),
+ persist.MetadataOptions{})
+ assert.NoError(t, w.Write(metadata,
bytesRefd(entries[i].data),
digest.Checksum(entries[i].data)))
}
@@ -141,8 +142,7 @@ func writeTestDataWithVolume(
// Check every entry has ID and Tags nil
for _, elem := range slice {
- assert.Nil(t, elem.id)
- assert.Nil(t, elem.tags.Values())
+ assert.Equal(t, persist.Metadata{}, elem.metadata)
}
}
@@ -301,9 +301,10 @@ func TestDuplicateWrite(t *testing.T) {
require.NoError(t, err)
for i := range entries {
- require.NoError(t, w.Write(
- entries[i].ID(),
+ metadata := persist.NewMetadataFromIDAndTags(entries[i].ID(),
entries[i].Tags(),
+ persist.MetadataOptions{})
+ require.NoError(t, w.Write(metadata,
bytesRefd(entries[i].data),
digest.Checksum(entries[i].data)))
}
@@ -354,7 +355,7 @@ func TestInfoReadWrite(t *testing.T) {
w := newTestWriter(t, filePathPrefix)
writeTestData(t, w, 0, testWriterStart, entries, persist.FileSetFlushType)
- readInfoFileResults := ReadInfoFiles(filePathPrefix, testNs1ID, 0, 16, nil)
+ readInfoFileResults := ReadInfoFiles(filePathPrefix, testNs1ID, 0, 16, nil, persist.FileSetFlushType)
require.Equal(t, 1, len(readInfoFileResults))
for _, result := range readInfoFileResults {
require.NoError(t, result.Err.Error())
@@ -379,7 +380,7 @@ func TestInfoReadWriteVolumeIndex(t *testing.T) {
writeTestDataWithVolume(t, w, 0, testWriterStart, volume, entries, persist.FileSetFlushType)
- readInfoFileResults := ReadInfoFiles(filePathPrefix, testNs1ID, 0, 16, nil)
+ readInfoFileResults := ReadInfoFiles(filePathPrefix, testNs1ID, 0, 16, nil, persist.FileSetFlushType)
require.Equal(t, 1, len(readInfoFileResults))
for _, result := range readInfoFileResults {
require.NoError(t, result.Err.Error())
@@ -457,19 +458,21 @@ func TestReusingWriterAfterWriteError(t *testing.T) {
BlockStart: testWriterStart,
},
}
+ metadata := persist.NewMetadataFromIDAndTags(entries[0].ID(),
+ entries[0].Tags(),
+ persist.MetadataOptions{})
require.NoError(t, w.Open(writerOpts))
- require.NoError(t, w.Write(
- entries[0].ID(),
- entries[0].Tags(),
+ require.NoError(t, w.Write(metadata,
bytesRefd(entries[0].data),
digest.Checksum(entries[0].data)))
// Intentionally force a writer error.
w.(*writer).err = errors.New("foo")
- require.Equal(t, "foo", w.Write(
- entries[1].ID(),
+ metadata = persist.NewMetadataFromIDAndTags(entries[1].ID(),
entries[1].Tags(),
+ persist.MetadataOptions{})
+ require.Equal(t, "foo", w.Write(metadata,
bytesRefd(entries[1].data),
digest.Checksum(entries[1].data)).Error())
w.Close()
@@ -503,15 +506,20 @@ func TestWriterOnlyWritesNonNilBytes(t *testing.T) {
BlockStart: testWriterStart,
},
}
+ metadata := persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{})
require.NoError(t, w.Open(writerOpts))
- w.WriteAll(ident.StringID("foo"), ident.Tags{},
+ err := w.WriteAll(metadata,
[]checked.Bytes{
checkedBytes([]byte{1, 2, 3}),
nil,
checkedBytes([]byte{4, 5, 6}),
},
digest.Checksum([]byte{1, 2, 3, 4, 5, 6}))
+ require.NoError(t, err)
assert.NoError(t, w.Close())
diff --git a/src/dbnode/persist/fs/retriever.go b/src/dbnode/persist/fs/retriever.go
index 9b340ccf75..19e28763df 100644
--- a/src/dbnode/persist/fs/retriever.go
+++ b/src/dbnode/persist/fs/retriever.go
@@ -39,6 +39,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
@@ -125,7 +126,10 @@ func NewBlockRetriever(
}, nil
}
-func (r *blockRetriever) Open(ns namespace.Metadata) error {
+func (r *blockRetriever) Open(
+ ns namespace.Metadata,
+ shardSet sharding.ShardSet,
+) error {
r.Lock()
defer r.Unlock()
@@ -134,7 +138,7 @@ func (r *blockRetriever) Open(ns namespace.Metadata) error {
}
seekerMgr := r.newSeekerMgrFn(r.bytesPool, r.fsOpts, r.opts)
- if err := seekerMgr.Open(ns); err != nil {
+ if err := seekerMgr.Open(ns, shardSet); err != nil {
return err
}
@@ -168,6 +172,17 @@ func (r *blockRetriever) CacheShardIndices(shards []uint32) error {
return seekerMgr.CacheShardIndices(shards)
}
+func (r *blockRetriever) AssignShardSet(shardSet sharding.ShardSet) {
+ // NB(bodu): Block retriever will always be open before calling this method.
+ // But have this check anyways to be safe.
+ r.RLock()
+ defer r.RUnlock()
+ if r.status != blockRetrieverOpen {
+ return
+ }
+ r.seekerMgr.AssignShardSet(shardSet)
+}
+
func (r *blockRetriever) fetchLoop(seekerMgr DataFileSetSeekerManager) {
var (
seekerResources = NewReusableSeekerResources(r.fsOpts)
@@ -320,9 +335,10 @@ func (r *blockRetriever) fetchBatch(
var (
seg, onRetrieveSeg ts.Segment
+ checksum = req.indexEntry.DataChecksum
)
if data != nil {
- seg = ts.NewSegment(data, nil, ts.FinalizeHead)
+ seg = ts.NewSegment(data, nil, checksum, ts.FinalizeHead)
}
// We don't need to call onRetrieve.OnRetrieveBlock if the ID was not found.
@@ -333,7 +349,7 @@ func (r *blockRetriever) fetchBatch(
// consequent fetches.
if data != nil {
dataCopy := r.bytesPool.Get(data.Len())
- onRetrieveSeg = ts.NewSegment(dataCopy, nil, ts.FinalizeHead)
+ onRetrieveSeg = ts.NewSegment(dataCopy, nil, checksum, ts.FinalizeHead)
dataCopy.AppendAll(data.Bytes())
}
if tags := req.indexEntry.EncodedTags; tags != nil && tags.Len() > 0 {
diff --git a/src/dbnode/persist/fs/retriever_test.go b/src/dbnode/persist/fs/retriever_test.go
index 2610e56a8d..b470e2af7f 100644
--- a/src/dbnode/persist/fs/retriever_test.go
+++ b/src/dbnode/persist/fs/retriever_test.go
@@ -34,7 +34,10 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/dbnode/digest"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/index/convert"
"github.com/m3db/m3/src/dbnode/ts"
@@ -57,6 +60,7 @@ type testBlockRetrieverOptions struct {
retrieverOpts BlockRetrieverOptions
fsOpts Options
newSeekerMgrFn newSeekerMgrFn
+ shards []uint32
}
type testCleanupFn func()
@@ -76,9 +80,15 @@ func newOpenTestBlockRetriever(
retriever.newSeekerMgrFn = opts.newSeekerMgrFn
}
+ shardSet, err := sharding.NewShardSet(
+ sharding.NewShards(opts.shards, shard.Available),
+ sharding.DefaultHashFn(1),
+ )
+ require.NoError(t, err)
+
nsPath := NamespaceDataDirPath(opts.fsOpts.FilePathPrefix(), testNs1ID)
require.NoError(t, os.MkdirAll(nsPath, opts.fsOpts.NewDirectoryMode()))
- require.NoError(t, retriever.Open(testNs1Metadata(t)))
+ require.NoError(t, retriever.Open(testNs1Metadata(t), shardSet))
return retriever, func() {
require.NoError(t, retriever.Close())
@@ -198,6 +208,7 @@ func testBlockRetrieverHighConcurrentSeeks(t *testing.T, shouldCacheShardIndices
SetFetchConcurrency(fetchConcurrency).
SetRetrieveRequestPool(retrieveRequestPool),
fsOpts: fsOpts,
+ shards: shards,
}
retriever, cleanup := newOpenTestBlockRetriever(t, opts)
@@ -289,7 +300,9 @@ func testBlockRetrieverHighConcurrentSeeks(t *testing.T, shouldCacheShardIndices
}
tags := testTagsFromIDAndVolume(id.String(), volume)
- err := w.Write(id, tags, data, digest.Checksum(data.Bytes()))
+ metadata := persist.NewMetadataFromIDAndTags(id, tags,
+ persist.MetadataOptions{})
+ err := w.Write(metadata, data, digest.Checksum(data.Bytes()))
require.NoError(t, err)
}
closer()
@@ -538,6 +551,7 @@ func TestBlockRetrieverIDDoesNotExist(t *testing.T) {
opts := testBlockRetrieverOptions{
retrieverOpts: defaultTestBlockRetrieverOptions,
fsOpts: fsOpts,
+ shards: []uint32{shard},
}
retriever, cleanup := newOpenTestBlockRetriever(t, opts)
defer cleanup()
@@ -547,7 +561,9 @@ func TestBlockRetrieverIDDoesNotExist(t *testing.T) {
data := checked.NewBytes([]byte("Hello world!"), nil)
data.IncRef()
defer data.DecRef()
- err = w.Write(ident.StringID("exists"), ident.Tags{}, data, digest.Checksum(data.Bytes()))
+ metadata := persist.NewMetadataFromIDAndTags(ident.StringID("exists"), ident.Tags{},
+ persist.MetadataOptions{})
+ err = w.Write(metadata, data, digest.Checksum(data.Bytes()))
assert.NoError(t, err)
closer()
@@ -584,6 +600,7 @@ func TestBlockRetrieverOnlyCreatesTagItersIfTagsExists(t *testing.T) {
opts := testBlockRetrieverOptions{
retrieverOpts: defaultTestBlockRetrieverOptions,
fsOpts: fsOpts,
+ shards: []uint32{shard},
}
retriever, cleanup := newOpenTestBlockRetriever(t, opts)
defer cleanup()
@@ -614,7 +631,9 @@ func TestBlockRetrieverOnlyCreatesTagItersIfTagsExists(t *testing.T) {
data.IncRef()
defer data.DecRef()
- err = w.Write(ident.StringID(write.id), write.tags, data, digest.Checksum(data.Bytes()))
+ metadata := persist.NewMetadataFromIDAndTags(ident.StringID(write.id), write.tags,
+ persist.MetadataOptions{})
+ err = w.Write(metadata, data, digest.Checksum(data.Bytes()))
require.NoError(t, err)
}
closer()
@@ -701,7 +720,7 @@ func testBlockRetrieverHandlesSeekErrors(t *testing.T, ctrl *gomock.Controller,
)
mockSeekerManager := NewMockDataFileSetSeekerManager(ctrl)
- mockSeekerManager.EXPECT().Open(gomock.Any()).Return(nil)
+ mockSeekerManager.EXPECT().Open(gomock.Any(), gomock.Any()).Return(nil)
mockSeekerManager.EXPECT().Test(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil)
mockSeekerManager.EXPECT().Borrow(gomock.Any(), gomock.Any()).Return(mockSeeker, nil)
mockSeekerManager.EXPECT().Return(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
@@ -721,6 +740,7 @@ func testBlockRetrieverHandlesSeekErrors(t *testing.T, ctrl *gomock.Controller,
retrieverOpts: defaultTestBlockRetrieverOptions,
fsOpts: fsOpts,
newSeekerMgrFn: newSeekerMgr,
+ shards: []uint32{shard},
}
retriever, cleanup := newOpenTestBlockRetriever(t, opts)
defer cleanup()
diff --git a/src/dbnode/persist/fs/seek.go b/src/dbnode/persist/fs/seek.go
index 87e4bff294..24e2e5f62c 100644
--- a/src/dbnode/persist/fs/seek.go
+++ b/src/dbnode/persist/fs/seek.go
@@ -65,8 +65,9 @@ type seeker struct {
// Data read from the indexInfo file. Note that we use xtime.UnixNano
// instead of time.Time to avoid keeping an extra pointer around.
- start xtime.UnixNano
- blockSize time.Duration
+ start xtime.UnixNano
+ blockSize time.Duration
+ versionChecker schema.VersionChecker
dataFd *os.File
indexFd *os.File
@@ -85,10 +86,10 @@ type seeker struct {
// IndexEntry is an entry from the index file which can be passed to
// SeekUsingIndexEntry to seek to the data for that entry
type IndexEntry struct {
- Size uint32
- Checksum uint32
- Offset int64
- EncodedTags checked.Bytes
+ Size uint32
+ DataChecksum uint32
+ Offset int64
+ EncodedTags checked.Bytes
}
// NewSeeker returns a new seeker.
@@ -224,6 +225,7 @@ func (s *seeker) Open(
}
s.start = xtime.UnixNano(info.BlockStart)
s.blockSize = time.Duration(info.BlockSize)
+ s.versionChecker = schema.NewVersionChecker(int(info.MajorVersion), int(info.MinorVersion))
err = s.validateIndexFileDigest(
indexFdWithDigest, expectedDigests.indexDigest)
@@ -363,7 +365,7 @@ func (s *seeker) SeekByIndexEntry(
// NB(r): _must_ check the checksum against known checksum as the data
// file might not have been verified if we haven't read through the file yet.
- if entry.Checksum != digest.Checksum(underlyingBuf) {
+ if entry.DataChecksum != digest.Checksum(underlyingBuf) {
return nil, errSeekChecksumMismatch
}
@@ -403,8 +405,7 @@ func (s *seeker) SeekIndexEntry(
// this is a tight loop (scanning linearly through the index file) we want to use a
// very cheap pool until we find what we're looking for, and then we can perform a single
// copy into checked.Bytes from the more expensive pool.
- entry, err := resources.xmsgpackDecoder.DecodeIndexEntry(
- resources.decodeIndexEntryBytesPool)
+ entry, err := resources.xmsgpackDecoder.DecodeIndexEntry(resources.decodeIndexEntryBytesPool)
if err == io.EOF {
// We reached the end of the file without finding it.
return IndexEntry{}, errSeekIDNotFound
@@ -434,10 +435,10 @@ func (s *seeker) SeekIndexEntry(
}
indexEntry := IndexEntry{
- Size: uint32(entry.Size),
- Checksum: uint32(entry.Checksum),
- Offset: entry.Offset,
- EncodedTags: checkedEncodedTags,
+ Size: uint32(entry.Size),
+ DataChecksum: uint32(entry.DataChecksum),
+ Offset: entry.Offset,
+ EncodedTags: checkedEncodedTags,
}
// Safe to return resources to the pool because ID will not be
@@ -510,6 +511,8 @@ func (s *seeker) ConcurrentClone() (ConcurrentDataFileSetSeeker, error) {
// they are concurrency safe and can be shared among clones.
indexFd: s.indexFd,
dataFd: s.dataFd,
+
+ versionChecker: s.versionChecker,
}
return seeker, nil
@@ -519,6 +522,12 @@ func (s *seeker) validateIndexFileDigest(
indexFdWithDigest digest.FdWithDigestReader,
expectedDigest uint32,
) error {
+ // If piecemeal checksumming validation enabled for index entries, do not attempt to validate the
+ // checksum of the entire file
+ if s.versionChecker.IndexEntryValidationEnabled() {
+ return nil
+ }
+
buf := make([]byte, s.opts.dataBufferSize)
for {
n, err := indexFdWithDigest.Read(buf)
diff --git a/src/dbnode/persist/fs/seek_manager.go b/src/dbnode/persist/fs/seek_manager.go
index e2497c5ed4..0e8db86594 100644
--- a/src/dbnode/persist/fs/seek_manager.go
+++ b/src/dbnode/persist/fs/seek_manager.go
@@ -28,6 +28,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage/block"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
@@ -53,8 +54,10 @@ var (
errCantCloseSeekerManagerWhileSeekersAreBorrowed = errors.New("cant close seeker manager while seekers are borrowed")
errReturnedUnmanagedSeeker = errors.New("cant return a seeker not managed by the seeker manager")
errUpdateOpenLeaseSeekerManagerNotOpen = errors.New("cant update open lease because seeker manager is not open")
+ errCacheShardIndicesSeekerManagerNotOpen = errors.New("cant cache shard indices because seeker manager is not open")
errConcurrentUpdateOpenLeaseNotAllowed = errors.New("concurrent open lease updates are not allowed")
errOutOfOrderUpdateOpenLease = errors.New("received update open lease volumes out of order")
+ errShardNotExists = errors.New("shard not exists")
)
type openAnyUnopenSeekersFn func(*seekersByTime) error
@@ -89,6 +92,7 @@ type seekerManager struct {
filePathPrefix string
status seekerManagerStatus
+ shardSet sharding.ShardSet
isUpdatingLease bool
cacheShardIndicesWorkers xsync.WorkerPool
@@ -123,6 +127,24 @@ type seekersAndBloom struct {
volume int
}
+func (s seekersAndBloom) closeWithLock() error {
+ multiErr := xerrors.NewMultiError()
+ for _, seeker := range s.seekers {
+ multiErr = multiErr.Add(seeker.seeker.Close())
+ }
+ return multiErr.FinalError()
+}
+
+// Returns true if any seekers are borrowed.
+func (s seekersAndBloom) anyBorrowedWithLock() bool {
+ for _, seeker := range s.seekers {
+ if seeker.isBorrowed {
+ return true
+ }
+ }
+ return false
+}
+
// borrowableSeeker is just a seeker with an additional field for keeping
// track of whether or not it has been borrowed.
type borrowableSeeker struct {
@@ -195,6 +217,7 @@ func NewSeekerManager(
// through the seekers.
func (m *seekerManager) Open(
nsMetadata namespace.Metadata,
+ shardSet sharding.ShardSet,
) error {
m.Lock()
if m.status != seekerManagerNotOpen {
@@ -204,6 +227,7 @@ func (m *seekerManager) Open(
m.namespace = nsMetadata.ID()
m.namespaceMetadata = nsMetadata
+ m.shardSet = shardSet
m.status = seekerManagerOpen
go m.openCloseLoop()
m.Unlock()
@@ -219,13 +243,24 @@ func (m *seekerManager) Open(
}
func (m *seekerManager) CacheShardIndices(shards []uint32) error {
+ m.RLock()
+ if m.status == seekerManagerNotOpen {
+ m.RUnlock()
+ return errCacheShardIndicesSeekerManagerNotOpen
+ }
+ m.RUnlock()
+
var (
multiErr = xerrors.NewMultiError()
resultsLock sync.Mutex
wg sync.WaitGroup
)
for _, shard := range shards {
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ if !ok {
+ multiErr = multiErr.Add(errShardNotExists)
+ continue
+ }
byTime.Lock()
// Track accessed to precache in open/close loop
@@ -247,9 +282,18 @@ func (m *seekerManager) CacheShardIndices(shards []uint32) error {
return multiErr.FinalError()
}
+func (m *seekerManager) AssignShardSet(shardSet sharding.ShardSet) {
+ m.Lock()
+ m.shardSet = shardSet
+ m.Unlock()
+}
+
func (m *seekerManager) Test(id ident.ID, shard uint32, start time.Time) (bool, error) {
startNano := xtime.ToUnixNano(start)
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ if !ok {
+ return false, errShardNotExists
+ }
// Try fast RLock() first.
byTime.RLock()
@@ -281,7 +325,10 @@ func (m *seekerManager) Test(id ident.ID, shard uint32, start time.Time) (bool,
// Borrow returns a "borrowed" seeker which the caller has exclusive access to
// until it's returned later.
func (m *seekerManager) Borrow(shard uint32, start time.Time) (ConcurrentDataFileSetSeeker, error) {
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ if !ok {
+ return nil, errShardNotExists
+ }
byTime.Lock()
defer byTime.Unlock()
@@ -315,8 +362,19 @@ func (m *seekerManager) Borrow(shard uint32, start time.Time) (ConcurrentDataFil
return availableSeeker.seeker, nil
}
+func (m *seekerManager) shardExistsWithLock(shard uint32) bool {
+ _, err := m.shardSet.LookupStateByID(shard)
+ // NB(bodu): LookupStateByID returns ErrInvalidShardID when shard
+ // does not exist in the shard map which means the shard is not available.
+ return err == nil
+}
+
func (m *seekerManager) Return(shard uint32, start time.Time, seeker ConcurrentDataFileSetSeeker) error {
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ if !ok {
+ return errShardNotExists
+ }
+
byTime.Lock()
defer byTime.Unlock()
@@ -348,56 +406,43 @@ func (m *seekerManager) Return(shard uint32, start time.Time, seeker ConcurrentD
// and inactive seekers. For more details on this read the comment above the UpdateOpenLease() method.
func (m *seekerManager) returnSeekerWithLock(seekers rotatableSeekers, seeker ConcurrentDataFileSetSeeker) (bool, error) {
// Check if the seeker being returned is an active seeker first.
- for i, compareSeeker := range seekers.active.seekers {
- if seeker == compareSeeker.seeker {
- compareSeeker.isBorrowed = false
- seekers.active.seekers[i] = compareSeeker
- return true, nil
- }
+ if m.markBorrowedSeekerAsReturned(&seekers.active, seeker) {
+ // We can return right away if we've returned an active seeker.
+ return true, nil
}
// If no match was found in the active seekers, it's possible that an inactive seeker is being returned.
- for i, compareSeeker := range seekers.inactive.seekers {
- if seeker == compareSeeker.seeker {
- compareSeeker.isBorrowed = false
- seekers.inactive.seekers[i] = compareSeeker
-
- // The goroutine that returns the last outstanding inactive seeker is responsible for notifying any
- // goroutines waiting for all inactive seekers to be returned and clearing out the inactive seekers
- // state entirely.
- allAreReturned := true
- for _, inactiveSeeker := range seekers.inactive.seekers {
- if inactiveSeeker.isBorrowed {
- allAreReturned = false
- break
- }
- }
-
- if !allAreReturned {
- return true, nil
- }
-
- // All the inactive seekers have been returned so it's safe to signal and clear them out.
- multiErr := xerrors.NewMultiError()
- for _, inactiveSeeker := range seekers.inactive.seekers {
- multiErr = multiErr.Add(inactiveSeeker.seeker.Close())
- }
-
- // Clear out inactive state.
- allInactiveSeekersClosedWg := seekers.inactive.wg
- seekers.inactive = seekersAndBloom{}
- if allInactiveSeekersClosedWg != nil {
- // Signal completion regardless of any errors encountered while closing.
- allInactiveSeekersClosedWg.Done()
- }
+ if m.markBorrowedSeekerAsReturned(&seekers.inactive, seeker) {
+ // The goroutine that returns the last outstanding inactive seeker is responsible for notifying any
+ // goroutines waiting for all inactive seekers to be returned and clearing out the inactive seekers
+ // state entirely.
+ if seekers.inactive.anyBorrowedWithLock() {
+ return true, nil
+ }
- return true, multiErr.FinalError()
+ err := seekers.inactive.closeWithLock()
+ if seekers.inactive.wg != nil {
+ // Signal completion regardless of any errors encountered while closing.
+ seekers.inactive.wg.Done()
+ seekers.inactive.wg = nil
}
+ return true, err
}
return false, nil
}
+func (m *seekerManager) markBorrowedSeekerAsReturned(seekers *seekersAndBloom, seeker ConcurrentDataFileSetSeeker) bool {
+ for i, compareSeeker := range seekers.seekers {
+ if seeker == compareSeeker.seeker {
+ compareSeeker.isBorrowed = false
+ seekers.seekers[i] = compareSeeker
+ return true
+ }
+ }
+ return false
+}
+
// UpdateOpenLease() implements block.Leaser. The contract of this API is that once the function
// returns successfully any resources associated with the previous lease should have been
// released (in this case the Seeker / files for the previous volume) and the resources associated
@@ -490,8 +535,12 @@ func (m *seekerManager) updateOpenLeaseHotSwapSeekers(
return nil, 0, err
}
+ byTime, ok := m.seekersByTime(descriptor.Shard)
+ if !ok {
+ return nil, 0, errShardNotExists
+ }
+
var (
- byTime = m.seekersByTime(descriptor.Shard)
blockStartNano = xtime.ToUnixNano(descriptor.BlockStart)
updateOpenLeaseResult = block.NoOpenLease
)
@@ -508,23 +557,15 @@ func (m *seekerManager) updateOpenLeaseHotSwapSeekers(
updateOpenLeaseResult = block.UpdateOpenLease
if seekers.active.volume > state.Volume {
// Ignore any close errors because its not relevant from the callers perspective.
- m.closeSeekersAndLogError(descriptor, newActiveSeekers.seekers)
+ m.closeSeekersAndLogError(descriptor, newActiveSeekers)
return nil, 0, errOutOfOrderUpdateOpenLease
}
seekers.inactive = seekers.active
seekers.active = newActiveSeekers
- anySeekersAreBorrowed := false
- for _, seeker := range seekers.inactive.seekers {
- if seeker.isBorrowed {
- anySeekersAreBorrowed = true
- break
- }
- }
-
var wg *sync.WaitGroup
- if anySeekersAreBorrowed {
+ if seekers.inactive.anyBorrowedWithLock() {
// If any of the seekers are borrowed setup a waitgroup which will be used to
// signal when they've all been returned (the last seeker that is returned via
// the Return() API will call wg.Done()).
@@ -533,7 +574,7 @@ func (m *seekerManager) updateOpenLeaseHotSwapSeekers(
seekers.inactive.wg = wg
} else {
// If none of the existing seekers are currently borrowed then we can just close them all.
- m.closeSeekersAndLogError(descriptor, seekers.inactive.seekers)
+ m.closeSeekersAndLogError(descriptor, seekers.inactive)
seekers.inactive = seekersAndBloom{}
}
byTime.seekers[blockStartNano] = seekers
@@ -579,17 +620,13 @@ func (m *seekerManager) acquireByTimeLockWaitGroupAware(
// closeSeekersAndLogError is a helper function that closes all the seekers in a slice of borrowableSeeker
// and emits a log if any errors occurred.
-func (m *seekerManager) closeSeekersAndLogError(descriptor block.LeaseDescriptor, seekers []borrowableSeeker) {
- var multiErr = xerrors.NewMultiError()
- for _, seeker := range seekers {
- multiErr = multiErr.Add(seeker.seeker.Close())
- }
- if multiErr.FinalError() != nil {
+func (m *seekerManager) closeSeekersAndLogError(descriptor block.LeaseDescriptor, seekers seekersAndBloom) {
+ if err := seekers.closeWithLock(); err != nil {
// Log the error but don't return it since its not relevant from
// the callers perspective.
m.logger.Error(
"error closing seeker in update open lease",
- zap.Error(multiErr.FinalError()),
+ zap.Error(err),
zap.String("namespace", descriptor.Namespace.String()),
zap.Int("shard", int(descriptor.Shard)),
zap.Time("blockStart", descriptor.BlockStart))
@@ -779,23 +816,30 @@ func (m *seekerManager) newOpenSeeker(
return seeker, nil
}
-func (m *seekerManager) seekersByTime(shard uint32) *seekersByTime {
+func (m *seekerManager) seekersByTime(shard uint32) (*seekersByTime, bool) {
m.RLock()
+ if !m.shardExistsWithLock(shard) {
+ m.RUnlock()
+ return nil, false
+ }
+
if int(shard) < len(m.seekersByShardIdx) {
byTime := m.seekersByShardIdx[shard]
m.RUnlock()
- return byTime
+ return byTime, true
}
-
m.RUnlock()
m.Lock()
defer m.Unlock()
+ if !m.shardExistsWithLock(shard) {
+ return nil, false
+ }
// Check if raced with another call to this method
if int(shard) < len(m.seekersByShardIdx) {
byTime := m.seekersByShardIdx[shard]
- return byTime
+ return byTime, true
}
seekersByShardIdx := make([]*seekersByTime, shard+1)
@@ -810,7 +854,7 @@ func (m *seekerManager) seekersByTime(shard uint32) *seekersByTime {
m.seekersByShardIdx = seekersByShardIdx
byTime := m.seekersByShardIdx[shard]
- return byTime
+ return byTime, true
}
func (m *seekerManager) Close() error {
@@ -827,21 +871,17 @@ func (m *seekerManager) Close() error {
byTime.Lock()
for _, seekersForBlock := range byTime.seekers {
// Ensure active seekers are all returned.
- for _, seeker := range seekersForBlock.active.seekers {
- if seeker.isBorrowed {
- byTime.Unlock()
- m.Unlock()
- return errCantCloseSeekerManagerWhileSeekersAreBorrowed
- }
+ if seekersForBlock.active.anyBorrowedWithLock() {
+ byTime.Unlock()
+ m.Unlock()
+ return errCantCloseSeekerManagerWhileSeekersAreBorrowed
}
// Ensure inactive seekers are all returned.
- for _, seeker := range seekersForBlock.inactive.seekers {
- if seeker.isBorrowed {
- byTime.Unlock()
- m.Unlock()
- return errCantCloseSeekerManagerWhileSeekersAreBorrowed
- }
+ if seekersForBlock.inactive.anyBorrowedWithLock() {
+ byTime.Unlock()
+ m.Unlock()
+ return errCantCloseSeekerManagerWhileSeekersAreBorrowed
}
}
byTime.Unlock()
@@ -885,7 +925,7 @@ func (m *seekerManager) openCloseLoop() {
var (
shouldTryOpen []*seekersByTime
shouldClose []seekerManagerPendingClose
- closing []borrowableSeeker
+ closing []seekersAndBloom
)
resetSlices := func() {
for i := range shouldTryOpen {
@@ -897,7 +937,7 @@ func (m *seekerManager) openCloseLoop() {
}
shouldClose = shouldClose[:0]
for i := range closing {
- closing[i] = borrowableSeeker{}
+ closing[i] = seekersAndBloom{}
}
closing = closing[:0]
}
@@ -933,7 +973,10 @@ func (m *seekerManager) openCloseLoop() {
byTime.RLock()
for blockStartNano := range byTime.seekers {
blockStart := blockStartNano.ToTime()
- if blockStart.Before(earliestSeekableBlockStart) {
+ if blockStart.Before(earliestSeekableBlockStart) ||
+ // Close seekers for shards that are no longer available. This
+ // ensure that seekers are eventually consistent w/ shard state.
+ !m.shardExistsWithLock(uint32(shard)) {
shouldClose = append(shouldClose, seekerManagerPendingClose{
shard: uint32(shard),
blockStart: blockStart,
@@ -952,26 +995,21 @@ func (m *seekerManager) openCloseLoop() {
allSeekersAreReturned := true
// Ensure no active seekers are still borrowed.
- for _, seeker := range seekers.active.seekers {
- if seeker.isBorrowed {
- allSeekersAreReturned = false
- break
- }
+ if seekers.active.anyBorrowedWithLock() {
+ allSeekersAreReturned = false
}
- // Ensure no ianctive seekers are still borrowed.
- for _, seeker := range seekers.inactive.seekers {
- if seeker.isBorrowed {
- allSeekersAreReturned = false
- break
- }
+ // Ensure no inactive seekers are still borrowed.
+ if seekers.inactive.anyBorrowedWithLock() {
+ allSeekersAreReturned = false
}
+
// Never close seekers unless they've all been returned because
// some of them are clones of the original and can't be used once
// the parent is closed (because they share underlying resources)
if allSeekersAreReturned {
- closing = append(closing, seekers.active.seekers...)
- closing = append(closing, seekers.inactive.seekers...)
+ closing = append(closing, seekers.active)
+ closing = append(closing, seekers.inactive)
delete(byTime.seekers, blockStartNano)
}
byTime.Unlock()
@@ -980,10 +1018,9 @@ func (m *seekerManager) openCloseLoop() {
m.RUnlock()
// Close after releasing lock so any IO is done out of lock
- for _, seeker := range closing {
- err := seeker.seeker.Close()
- if err != nil {
- m.logger.Error("err closing seeker in SeekerManager openCloseLoop", zap.Error(err))
+ for _, seekersAndBloom := range closing {
+ if err := seekersAndBloom.closeWithLock(); err != nil {
+ m.logger.Error("err closing seekersAndBloom in SeekerManager openCloseLoop", zap.Error(err))
}
}
@@ -998,23 +1035,13 @@ func (m *seekerManager) openCloseLoop() {
byTime.Lock()
for _, seekersForBlock := range byTime.seekers {
// Close the active seekers.
- for _, seeker := range seekersForBlock.active.seekers {
- // We don't need to check if the seeker is borrowed here because we don't allow the
- // SeekerManager to be closed if any seekers are still outstanding.
- err := seeker.seeker.Close()
- if err != nil {
- m.logger.Error("err closing seeker in SeekerManager at end of openCloseLoop", zap.Error(err))
- }
+ if err := seekersForBlock.active.closeWithLock(); err != nil {
+ m.logger.Error("err closing seeker in SeekerManager at end of openCloseLoop", zap.Error(err))
}
// Close the inactive seekers.
- for _, seeker := range seekersForBlock.inactive.seekers {
- // We don't need to check if the seeker is borrowed here because we don't allow the
- // SeekerManager to be closed if any seekers are still outstanding.
- err := seeker.seeker.Close()
- if err != nil {
- m.logger.Error("err closing seeker in SeekerManager at end of openCloseLoop", zap.Error(err))
- }
+ if err := seekersForBlock.inactive.closeWithLock(); err != nil {
+ m.logger.Error("err closing seeker in SeekerManager at end of openCloseLoop", zap.Error(err))
}
}
byTime.seekers = nil
diff --git a/src/dbnode/persist/fs/seek_manager_test.go b/src/dbnode/persist/fs/seek_manager_test.go
index 1f63ddde33..8d27e2cb63 100644
--- a/src/dbnode/persist/fs/seek_manager_test.go
+++ b/src/dbnode/persist/fs/seek_manager_test.go
@@ -25,6 +25,8 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/cluster/shard"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -50,7 +52,14 @@ func TestSeekerManagerCacheShardIndices(t *testing.T) {
defer leaktest.CheckTimeout(t, 1*time.Minute)()
shards := []uint32{2, 5, 9, 478, 1023}
+ metadata := testNs1Metadata(t)
+ shardSet, err := sharding.NewShardSet(
+ sharding.NewShards(shards, shard.Available),
+ sharding.DefaultHashFn(1),
+ )
+ require.NoError(t, err)
m := NewSeekerManager(nil, testDefaultOpts, defaultTestBlockRetrieverOptions).(*seekerManager)
+ require.NoError(t, m.Open(metadata, shardSet))
byTimes := make(map[uint32]*seekersByTime)
var mu sync.Mutex
m.openAnyUnopenSeekersFn = func(byTime *seekersByTime) error {
@@ -68,13 +77,13 @@ func TestSeekerManagerCacheShardIndices(t *testing.T) {
}
// Assert seeksByShardIdx match expectations
- shardSet := make(map[uint32]struct{}, len(shards))
+ shardSetMap := make(map[uint32]struct{}, len(shards))
for _, shard := range shards {
- shardSet[shard] = struct{}{}
+ shardSetMap[shard] = struct{}{}
}
for shard, byTime := range m.seekersByShardIdx {
- _, exists := shardSet[uint32(shard)]
+ _, exists := shardSetMap[uint32(shard)]
if !exists {
require.False(t, byTime.accessed)
} else {
@@ -82,6 +91,8 @@ func TestSeekerManagerCacheShardIndices(t *testing.T) {
require.Equal(t, int(shard), int(byTime.shard))
}
}
+
+ require.NoError(t, m.Close())
}
func TestSeekerManagerUpdateOpenLease(t *testing.T) {
@@ -125,14 +136,20 @@ func TestSeekerManagerUpdateOpenLease(t *testing.T) {
}
metadata := testNs1Metadata(t)
+ shardSet, err := sharding.NewShardSet(
+ sharding.NewShards(shards, shard.Available),
+ sharding.DefaultHashFn(1),
+ )
+ require.NoError(t, err)
// Pick a start time thats within retention so the background loop doesn't close
// the seeker.
blockStart := time.Now().Truncate(metadata.Options().RetentionOptions().BlockSize())
- require.NoError(t, m.Open(metadata))
+ require.NoError(t, m.Open(metadata, shardSet))
for _, shard := range shards {
seeker, err := m.Borrow(shard, blockStart)
require.NoError(t, err)
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
byTime.RLock()
seekers := byTime.seekers[xtime.ToUnixNano(blockStart)]
require.Equal(t, defaultTestingFetchConcurrency, len(seekers.active.seekers))
@@ -151,7 +168,8 @@ func TestSeekerManagerUpdateOpenLease(t *testing.T) {
require.NoError(t, err)
require.Equal(t, block.UpdateOpenLease, updateResult)
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
byTime.RLock()
seekers := byTime.seekers[xtime.ToUnixNano(blockStart)]
require.Equal(t, defaultTestingFetchConcurrency, len(seekers.active.seekers))
@@ -173,7 +191,8 @@ func TestSeekerManagerUpdateOpenLease(t *testing.T) {
require.NoError(t, err)
require.Equal(t, block.NoOpenLease, updateResult)
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
byTime.RLock()
seekers := byTime.seekers[xtime.ToUnixNano(blockStart)]
require.Equal(t, defaultTestingFetchConcurrency, len(seekers.active.seekers))
@@ -223,11 +242,17 @@ func TestSeekerManagerBorrowOpenSeekersLazy(t *testing.T) {
}
metadata := testNs1Metadata(t)
- require.NoError(t, m.Open(metadata))
+ shardSet, err := sharding.NewShardSet(
+ sharding.NewShards(shards, shard.Available),
+ sharding.DefaultHashFn(1),
+ )
+ require.NoError(t, err)
+ require.NoError(t, m.Open(metadata, shardSet))
for _, shard := range shards {
seeker, err := m.Borrow(shard, time.Time{})
require.NoError(t, err)
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
byTime.RLock()
seekers := byTime.seekers[xtime.ToUnixNano(time.Time{})]
require.Equal(t, defaultTestingFetchConcurrency, len(seekers.active.seekers))
@@ -245,8 +270,6 @@ func TestSeekerManagerOpenCloseLoop(t *testing.T) {
defer leaktest.CheckTimeout(t, 1*time.Minute)()
ctrl := gomock.NewController(t)
-
- shards := []uint32{2, 5, 9, 478, 1023}
m := NewSeekerManager(nil, testDefaultOpts, defaultTestBlockRetrieverOptions).(*seekerManager)
clockOpts := m.opts.ClockOptions()
now := clockOpts.NowFn()()
@@ -294,20 +317,28 @@ func TestSeekerManagerOpenCloseLoop(t *testing.T) {
return nil
}
- // Force all the seekers to be opened
- require.NoError(t, m.CacheShardIndices(shards))
-
// Notified everytime the openCloseLoop ticks
tickCh := make(chan struct{})
cleanupCh := make(chan struct{})
+
m.sleepFn = func(_ time.Duration) {
tickCh <- struct{}{}
}
+ shards := []uint32{2, 5, 9, 478, 1023}
metadata := testNs1Metadata(t)
+ shardSet, err := sharding.NewShardSet(
+ sharding.NewShards(shards, shard.Available),
+ sharding.DefaultHashFn(1),
+ )
+ require.NoError(t, err)
+ require.NoError(t, m.Open(metadata, shardSet))
+
+ // Force all the seekers to be opened
+ require.NoError(t, m.CacheShardIndices(shards))
+
seekers := []ConcurrentDataFileSetSeeker{}
- require.NoError(t, m.Open(metadata))
// Steps is a series of steps for the test. It is guaranteed that at least
// one (not exactly one!) tick of the openCloseLoop will occur between every step.
steps := []struct {
@@ -319,7 +350,10 @@ func TestSeekerManagerOpenCloseLoop(t *testing.T) {
step: func() {
m.RLock()
for _, shard := range shards {
- require.Equal(t, 1, len(m.seekersByTime(shard).seekers[startNano].active.seekers))
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
+
+ require.Equal(t, 1, len(byTime.seekers[startNano].active.seekers))
}
m.RUnlock()
},
@@ -350,7 +384,9 @@ func TestSeekerManagerOpenCloseLoop(t *testing.T) {
step: func() {
m.RLock()
for _, shard := range shards {
- require.Equal(t, 1, len(m.seekersByTime(shard).seekers[startNano].active.seekers))
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
+ require.Equal(t, 1, len(byTime.seekers[startNano].active.seekers))
}
m.RUnlock()
},
@@ -368,9 +404,10 @@ func TestSeekerManagerOpenCloseLoop(t *testing.T) {
step: func() {
m.RLock()
for _, shard := range shards {
- byTime := m.seekersByTime(shard)
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
byTime.RLock()
- _, ok := byTime.seekers[startNano]
+ _, ok = byTime.seekers[startNano]
byTime.RUnlock()
require.False(t, ok)
}
@@ -406,3 +443,131 @@ func TestSeekerManagerOpenCloseLoop(t *testing.T) {
// to prevent the test itself from interfering with the goroutine leak test
close(cleanupCh)
}
+
+func TestSeekerManagerAssignShardSet(t *testing.T) {
+ defer leaktest.CheckTimeout(t, 1*time.Minute)()
+
+ var (
+ ctrl = gomock.NewController(t)
+ shards = []uint32{1, 2}
+ m = NewSeekerManager(nil, testDefaultOpts, defaultTestBlockRetrieverOptions).(*seekerManager)
+ )
+ defer ctrl.Finish()
+
+ var (
+ wg sync.WaitGroup
+ mockSeekerStatsLock sync.Mutex
+ numMockSeekerClosesByShardAndBlockStart = make(map[uint32]map[xtime.UnixNano]int)
+ )
+ m.newOpenSeekerFn = func(
+ shard uint32,
+ blockStart time.Time,
+ volume int,
+ ) (DataFileSetSeeker, error) {
+ // We expect `defaultTestingFetchConcurrency` number of calls to Close because we return this
+ // many numbers of clones and each clone will need to be closed.
+ wg.Add(defaultTestingFetchConcurrency)
+
+ mock := NewMockDataFileSetSeeker(ctrl)
+ // ConcurrentClone() will be called fetchConcurrency-1 times because the original can be used
+ // as one of the clones.
+ mock.EXPECT().ConcurrentClone().Times(defaultTestingFetchConcurrency-1).Return(mock, nil)
+ mock.EXPECT().Close().Times(defaultTestingFetchConcurrency).DoAndReturn(func() error {
+ mockSeekerStatsLock.Lock()
+ numMockSeekerClosesByBlockStart, ok := numMockSeekerClosesByShardAndBlockStart[shard]
+ if !ok {
+ numMockSeekerClosesByBlockStart = make(map[xtime.UnixNano]int)
+ numMockSeekerClosesByShardAndBlockStart[shard] = numMockSeekerClosesByBlockStart
+ }
+ numMockSeekerClosesByBlockStart[xtime.ToUnixNano(blockStart)]++
+ mockSeekerStatsLock.Unlock()
+ wg.Done()
+ return nil
+ })
+ mock.EXPECT().ConcurrentIDBloomFilter().Return(nil).AnyTimes()
+ return mock, nil
+ }
+ m.sleepFn = func(_ time.Duration) {
+ time.Sleep(time.Millisecond)
+ }
+
+ metadata := testNs1Metadata(t)
+ shardSet, err := sharding.NewShardSet(
+ sharding.NewShards(shards, shard.Available),
+ sharding.DefaultHashFn(1),
+ )
+ require.NoError(t, err)
+ // Pick a start time thats within retention so the background loop doesn't close
+ // the seeker.
+ blockStart := time.Now().Truncate(metadata.Options().RetentionOptions().BlockSize())
+ require.NoError(t, m.Open(metadata, shardSet))
+
+ for _, shard := range shards {
+ seeker, err := m.Borrow(shard, blockStart)
+ require.NoError(t, err)
+ require.NoError(t, m.Return(shard, blockStart, seeker))
+ }
+
+ // Ensure that UpdateOpenLease() updates the volumes.
+ for _, shard := range shards {
+ updateResult, err := m.UpdateOpenLease(block.LeaseDescriptor{
+ Namespace: metadata.ID(),
+ Shard: shard,
+ BlockStart: blockStart,
+ }, block.LeaseState{Volume: 1})
+ require.NoError(t, err)
+ require.Equal(t, block.UpdateOpenLease, updateResult)
+
+ byTime, ok := m.seekersByTime(shard)
+ require.True(t, ok)
+ byTime.RLock()
+ byTime.RUnlock()
+ }
+
+ mockSeekerStatsLock.Lock()
+ for _, numMockSeekerClosesByBlockStart := range numMockSeekerClosesByShardAndBlockStart {
+ require.Equal(t,
+ defaultTestingFetchConcurrency,
+ numMockSeekerClosesByBlockStart[xtime.ToUnixNano(blockStart)])
+ }
+ mockSeekerStatsLock.Unlock()
+
+ // Shards have moved off the node so we assign an empty shard set.
+ m.AssignShardSet(sharding.NewEmptyShardSet(sharding.DefaultHashFn(1)))
+ // Wait until the open/close loop has finished closing all the shards marked to be closed.
+ wg.Wait()
+
+ // Verify that shards are no longer available.
+ for _, shard := range shards {
+ ok, err := m.Test(nil, shard, blockStart)
+ require.Equal(t, errShardNotExists, err)
+ require.False(t, ok)
+ _, err = m.Borrow(shard, blockStart)
+ require.Equal(t, errShardNotExists, err)
+ }
+
+ // Verify that we see the expected # of closes per block start.
+ mockSeekerStatsLock.Lock()
+ for _, numMockSeekerClosesByBlockStart := range numMockSeekerClosesByShardAndBlockStart {
+ for start, numMockSeekerCloses := range numMockSeekerClosesByBlockStart {
+ if xtime.ToUnixNano(blockStart) == start {
+ // NB(bodu): These get closed twice since they've been closed once already due to updating their block lease.
+ require.Equal(t, defaultTestingFetchConcurrency*2, numMockSeekerCloses)
+ continue
+ }
+ require.Equal(t, defaultTestingFetchConcurrency, numMockSeekerCloses)
+ }
+ }
+ mockSeekerStatsLock.Unlock()
+
+ // Shards have moved back to the node so we assign a populated shard set again.
+ m.AssignShardSet(shardSet)
+ // Ensure that we can (once again) borrow the shards.
+ for _, shard := range shards {
+ seeker, err := m.Borrow(shard, blockStart)
+ require.NoError(t, err)
+ require.NoError(t, m.Return(shard, blockStart, seeker))
+ }
+
+ require.NoError(t, m.Close())
+}
diff --git a/src/dbnode/persist/fs/seek_test.go b/src/dbnode/persist/fs/seek_test.go
index e57962e00f..f74dadadae 100644
--- a/src/dbnode/persist/fs/seek_test.go
+++ b/src/dbnode/persist/fs/seek_test.go
@@ -29,8 +29,9 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/digest"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/schema"
"github.com/m3db/m3/src/x/ident"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -89,12 +90,15 @@ func TestSeekDataUnexpectedSize(t *testing.T) {
BlockStart: testWriterStart,
},
}
+ metadata := persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{})
err = w.Open(writerOpts)
assert.NoError(t, err)
dataFile := w.(*writer).dataFdWithDigest.Fd().Name()
- assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ assert.NoError(t, w.Write(metadata,
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 3})))
assert.NoError(t, w.Close())
@@ -136,7 +140,10 @@ func TestSeekBadChecksum(t *testing.T) {
// Write data with wrong checksum
assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 4})))
assert.NoError(t, w.Close())
@@ -175,18 +182,24 @@ func TestSeek(t *testing.T) {
err = w.Open(writerOpts)
assert.NoError(t, err)
assert.NoError(t, w.Write(
- ident.StringID("foo1"),
- ident.NewTags(ident.StringTag("num", "1")),
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo1"),
+ ident.NewTags(ident.StringTag("num", "1")),
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 1}),
digest.Checksum([]byte{1, 2, 1})))
assert.NoError(t, w.Write(
- ident.StringID("foo2"),
- ident.NewTags(ident.StringTag("num", "2")),
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo2"),
+ ident.NewTags(ident.StringTag("num", "2")),
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 2}),
digest.Checksum([]byte{1, 2, 2})))
assert.NoError(t, w.Write(
- ident.StringID("foo3"),
- ident.NewTags(ident.StringTag("num", "3")),
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo3"),
+ ident.NewTags(ident.StringTag("num", "3")),
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 3})))
assert.NoError(t, w.Close())
@@ -246,15 +259,24 @@ func TestSeekIDNotExists(t *testing.T) {
err = w.Open(writerOpts)
assert.NoError(t, err)
assert.NoError(t, w.Write(
- ident.StringID("foo10"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo10"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 1}),
digest.Checksum([]byte{1, 2, 1})))
assert.NoError(t, w.Write(
- ident.StringID("foo20"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo20"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 2}),
digest.Checksum([]byte{1, 2, 2})))
assert.NoError(t, w.Write(
- ident.StringID("foo30"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo30"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 3})))
assert.NoError(t, w.Close())
@@ -300,7 +322,10 @@ func TestReuseSeeker(t *testing.T) {
err = w.Open(writerOpts)
assert.NoError(t, err)
assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 1}),
digest.Checksum([]byte{1, 2, 1})))
assert.NoError(t, w.Close())
@@ -316,7 +341,10 @@ func TestReuseSeeker(t *testing.T) {
err = w.Open(writerOpts)
assert.NoError(t, err)
assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 3})))
assert.NoError(t, w.Close())
@@ -365,7 +393,10 @@ func TestCloneSeeker(t *testing.T) {
err = w.Open(writerOpts)
assert.NoError(t, err)
assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 1}),
digest.Checksum([]byte{1, 2, 1})))
assert.NoError(t, w.Close())
@@ -381,7 +412,10 @@ func TestCloneSeeker(t *testing.T) {
err = w.Open(writerOpts)
assert.NoError(t, err)
assert.NoError(t, w.Write(
- ident.StringID("foo"), ident.Tags{},
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
bytesRefd([]byte{1, 2, 3}),
digest.Checksum([]byte{1, 2, 3})))
assert.NoError(t, w.Close())
@@ -402,6 +436,71 @@ func TestCloneSeeker(t *testing.T) {
assert.Equal(t, []byte{1, 2, 1}, data.Bytes())
}
+func TestSeekValidateIndexEntriesFile(t *testing.T) {
+ dir, err := ioutil.TempDir("", "testdb")
+ if err != nil {
+ t.Fatal(err)
+ }
+ filePathPrefix := filepath.Join(dir, "")
+ defer os.RemoveAll(dir)
+
+ w := newTestWriter(t, filePathPrefix)
+ writerOpts := DataWriterOpenOptions{
+ BlockSize: testBlockSize,
+ Identifier: FileSetFileIdentifier{
+ Namespace: testNs1ID,
+ Shard: 0,
+ BlockStart: testWriterStart,
+ },
+ }
+ err = w.Open(writerOpts)
+ assert.NoError(t, err)
+
+ // Write data
+ assert.NoError(t, w.Write(
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("foo"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
+ bytesRefd([]byte{1, 2, 3}),
+ digest.Checksum([]byte{1, 2, 3})))
+ assert.NoError(t, w.Close())
+
+ shardDir := ShardDataDirPath(filePathPrefix, testNs1ID, 0)
+
+ // With full file validation disabled
+ s := seeker{opts: seekerOpts{
+ filePathPrefix: filePathPrefix,
+ dataBufferSize: testReaderBufferSize,
+ infoBufferSize: testReaderBufferSize,
+ bytesPool: testBytesPool,
+ keepUnreadBuf: false,
+ opts: testDefaultOpts,
+ }}
+ s.versionChecker = schema.NewVersionChecker(1, 1)
+
+ indexFilePath := dataFilesetPathFromTimeAndIndex(shardDir, testWriterStart, 0, indexFileSuffix, false)
+ indexFd, err := os.Open(indexFilePath)
+ assert.NoError(t, err)
+ indexReader := digest.NewFdWithDigestReader(defaultInfoReaderBufferSize)
+ indexReader.Reset(indexFd)
+
+ assert.NoError(t, s.validateIndexFileDigest(indexReader, 0))
+
+ // With full file validation enabled
+ s.versionChecker = schema.NewVersionChecker(1, 0)
+ _, err = indexFd.Seek(0, 0)
+ assert.NoError(t, err)
+ indexReader.Reset(indexFd)
+
+ assert.Error(t, s.validateIndexFileDigest(indexReader, 0))
+
+ // Sanity check -- call seeker#Open and ensure VersionChecker is set correctly
+ err = s.Open(testNs1ID, 0, testWriterStart, 0, newTestReusableSeekerResources())
+ assert.NoError(t, err)
+ assert.True(t, s.versionChecker.IndexEntryValidationEnabled())
+}
+
func newTestReusableSeekerResources() ReusableSeekerResources {
return NewReusableSeekerResources(testDefaultOpts)
}
diff --git a/src/dbnode/persist/fs/segments.go b/src/dbnode/persist/fs/segments.go
new file mode 100644
index 0000000000..a568a62dad
--- /dev/null
+++ b/src/dbnode/persist/fs/segments.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2020 Uber Technologies, Inc
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE
+
+package fs
+
+import (
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/generated/proto/index"
+ "github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+type segments struct {
+ absoluteFilepaths []string
+ shardRanges result.ShardTimeRanges
+ volumeType idxpersist.IndexVolumeType
+ volumeIndex int
+ blockStart time.Time
+}
+
+// NewSegments returns an on disk segments for an index info file.
+func NewSegments(
+ info index.IndexVolumeInfo,
+ volumeIndex int,
+ absoluteFilepaths []string,
+) Segments {
+ sr := result.NewShardTimeRanges()
+ indexBlockStart := xtime.UnixNano(info.BlockStart).ToTime()
+ indexBlockRange := xtime.Range{
+ Start: indexBlockStart,
+ End: indexBlockStart.Add(time.Duration(info.BlockSize)),
+ }
+ for _, shard := range info.Shards {
+ ranges, ok := sr.Get(shard)
+ if !ok {
+ ranges = xtime.NewRanges()
+ sr.Set(shard, ranges)
+ }
+ ranges.AddRange(indexBlockRange)
+ }
+ volumeType := idxpersist.DefaultIndexVolumeType
+ if info.IndexVolumeType != nil {
+ volumeType = idxpersist.IndexVolumeType(info.IndexVolumeType.Value)
+ }
+ return &segments{
+ shardRanges: sr,
+ volumeType: volumeType,
+ volumeIndex: volumeIndex,
+ absoluteFilepaths: absoluteFilepaths,
+ blockStart: indexBlockStart,
+ }
+}
+
+func (o *segments) ShardTimeRanges() result.ShardTimeRanges {
+ return o.shardRanges
+}
+
+func (o *segments) VolumeType() idxpersist.IndexVolumeType {
+ return o.volumeType
+}
+
+func (o *segments) AbsoluteFilePaths() []string {
+ return o.absoluteFilepaths
+}
+
+func (o *segments) VolumeIndex() int {
+ return o.volumeIndex
+}
+
+func (o *segments) BlockStart() time.Time {
+ return o.blockStart
+}
diff --git a/src/dbnode/persist/fs/types.go b/src/dbnode/persist/fs/types.go
index b187240c72..2c28c7e359 100644
--- a/src/dbnode/persist/fs/types.go
+++ b/src/dbnode/persist/fs/types.go
@@ -31,8 +31,11 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/dbnode/runtime"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage/block"
+ "github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
@@ -85,11 +88,14 @@ type DataFileSetWriter interface {
// Write will write the id and data pair and returns an error on a write error. Callers
// must not call this method with a given ID more than once.
- Write(id ident.ID, tags ident.Tags, data checked.Bytes, checksum uint32) error
+ Write(metadata persist.Metadata, data checked.Bytes, checksum uint32) error
// WriteAll will write the id and all byte slices and returns an error on a write error.
// Callers must not call this method with a given ID more than once.
- WriteAll(id ident.ID, tags ident.Tags, data []checked.Bytes, checksum uint32) error
+ WriteAll(metadata persist.Metadata, data []checked.Bytes, checksum uint32) error
+
+ // DeferClose returns a DataCloser that defers writing of a checkpoint file.
+ DeferClose() (persist.DataCloser, error)
}
// SnapshotMetadataFileWriter writes out snapshot metadata files.
@@ -116,6 +122,10 @@ type DataFileSetReaderStatus struct {
type DataReaderOpenOptions struct {
Identifier FileSetFileIdentifier
FileSetType persist.FileSetType
+ // NB(bodu): This option can inform the reader to optimize for reading
+ // only metadata by not sorting index entries. Setting this option will
+ // throw an error if a regular `Read()` is attempted.
+ OptimizedReadMetadataOnly bool
}
// DataFileSetReader provides an unsynchronized reader for a TSDB file set
@@ -238,12 +248,18 @@ type DataFileSetSeekerManager interface {
io.Closer
// Open opens the seekers for a given namespace.
- Open(md namespace.Metadata) error
+ Open(
+ md namespace.Metadata,
+ shardSet sharding.ShardSet,
+ ) error
// CacheShardIndices will pre-parse the indexes for given shards
// to improve times when seeking to a block.
CacheShardIndices(shards []uint32) error
+ // AssignShardSet assigns current per ns shardset.
+ AssignShardSet(shardSet sharding.ShardSet)
+
// Borrow returns an open seeker for a given shard, block start time, and
// volume.
Borrow(shard uint32, start time.Time) (ConcurrentDataFileSetSeeker, error)
@@ -263,7 +279,10 @@ type DataBlockRetriever interface {
block.DatabaseBlockRetriever
// Open the block retriever to retrieve from a namespace
- Open(md namespace.Metadata) error
+ Open(
+ md namespace.Metadata,
+ shardSet sharding.ShardSet,
+ ) error
}
// RetrievableDataBlockSegmentReader is a retrievable block reader
@@ -283,7 +302,8 @@ type IndexWriterOpenOptions struct {
FileSetType persist.FileSetType
Shards map[uint32]struct{}
// Only used when writing snapshot files
- Snapshot IndexWriterSnapshotOptions
+ Snapshot IndexWriterSnapshotOptions
+ IndexVolumeType idxpersist.IndexVolumeType
}
// IndexFileSetWriter is a index file set writer.
@@ -461,17 +481,37 @@ type Options interface {
// TagDecoderPool returns the tag decoder pool.
TagDecoderPool() serialize.TagDecoderPool
- // SetFStOptions sets the fst options.
+ // SetFSTOptions sets the fst options.
SetFSTOptions(value fst.Options) Options
// FSTOptions returns the fst options.
FSTOptions() fst.Options
+ // SetFStWriterOptions sets the fst writer options.
+ SetFSTWriterOptions(value fst.WriterOptions) Options
+
+ // FSTWriterOptions returns the fst writer options.
+ FSTWriterOptions() fst.WriterOptions
+
// SetMmapReporter sets the mmap reporter.
- SetMmapReporter(mmapReporter mmap.Reporter) Options
+ SetMmapReporter(value mmap.Reporter) Options
// MmapReporter returns the mmap reporter.
MmapReporter() mmap.Reporter
+
+ // SetIndexReaderAutovalidateIndexSegments sets the index reader to
+ // autovalidate index segments data integrity on file open.
+ SetIndexReaderAutovalidateIndexSegments(value bool) Options
+
+ // IndexReaderAutovalidateIndexSegments returns the index reader to
+ // autovalidate index segments data integrity on file open.
+ IndexReaderAutovalidateIndexSegments() bool
+
+ // SetEncodingOptions sets the encoder options used by the encoder.
+ SetEncodingOptions(value msgpack.LegacyEncodingOptions) Options
+
+ // EncodingOptions returns the encoder options used by the encoder.
+ EncodingOptions() msgpack.LegacyEncodingOptions
}
// BlockRetrieverOptions represents the options for block retrieval
@@ -512,7 +552,7 @@ type BlockRetrieverOptions interface {
// ForEachRemainingFn is the function that is run on each of the remaining
// series of the merge target that did not intersect with the fileset.
-type ForEachRemainingFn func(seriesID ident.ID, tags ident.Tags, data []xio.BlockReader) error
+type ForEachRemainingFn func(seriesMetadata doc.Document, data block.FetchBlockResult) error
// MergeWith is an interface that the fs merger uses to merge data with.
type MergeWith interface {
@@ -544,6 +584,20 @@ type Merger interface {
nextVolumeIndex int,
flushPreparer persist.FlushPreparer,
nsCtx namespace.Context,
+ onFlush persist.OnFlushSeries,
+ ) (persist.DataCloser, error)
+
+ // MergeAndCleanup merges the specified fileset file with a merge target and removes the previous version of the
+ // fileset. This should only be called within the bootstrapper. Any other file deletions outside of the bootstrapper
+ // should be handled by the CleanupManager.
+ MergeAndCleanup(
+ fileID FileSetFileIdentifier,
+ mergeWith MergeWith,
+ nextVolumeIndex int,
+ flushPreparer persist.FlushPreparer,
+ nsCtx namespace.Context,
+ onFlush persist.OnFlushSeries,
+ isBootstrapped bool,
) error
}
@@ -556,5 +610,21 @@ type NewMergerFn func(
identPool ident.Pool,
encoderPool encoding.EncoderPool,
contextPool context.Pool,
+ filePathPrefix string,
nsOpts namespace.Options,
) Merger
+
+// Segments represents on index segments on disk for an index volume.
+type Segments interface {
+ ShardTimeRanges() result.ShardTimeRanges
+ VolumeType() idxpersist.IndexVolumeType
+ VolumeIndex() int
+ AbsoluteFilePaths() []string
+ BlockStart() time.Time
+}
+
+// InfoFileResultsPerShard maps shards to info files.
+type InfoFileResultsPerShard map[uint32][]ReadInfoFileResult
+
+// InfoFilesByNamespace maps a namespace to info files grouped by shard.
+type InfoFilesByNamespace map[namespace.Metadata]InfoFileResultsPerShard
diff --git a/src/dbnode/persist/fs/write.go b/src/dbnode/persist/fs/write.go
index cb05a82c3b..b7b6b3574a 100644
--- a/src/dbnode/persist/fs/write.go
+++ b/src/dbnode/persist/fs/write.go
@@ -29,7 +29,7 @@ import (
"sort"
"time"
- "github.com/m3db/bloom"
+ "github.com/m3db/bloom/v4"
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
@@ -60,6 +60,7 @@ type writer struct {
summariesPercent float64
bloomFilterFalsePositivePercent float64
+ bufferSize int
infoFdWithDigest digest.FdWithDigestWriter
indexFdWithDigest digest.FdWithDigestWriter
@@ -80,24 +81,28 @@ type writer struct {
encoder *msgpack.Encoder
digestBuf digest.Buffer
singleCheckedBytes []checked.Bytes
+ tagsIterator ident.TagsIterator
tagEncoderPool serialize.TagEncoderPool
err error
}
type indexEntry struct {
index int64
- id ident.ID
- tags ident.Tags
+ metadata persist.Metadata
dataFileOffset int64
indexFileOffset int64
size uint32
- checksum uint32
+ dataChecksum uint32
}
type indexEntries []indexEntry
func (e indexEntries) releaseRefs() {
- // memset zero loop optimization
+ // Close any metadata.
+ for _, elem := range e {
+ elem.metadata.Finalize()
+ }
+ // Apply memset zero loop optimization.
var zeroed indexEntry
for i := range e {
e[i] = zeroed
@@ -109,7 +114,7 @@ func (e indexEntries) Len() int {
}
func (e indexEntries) Less(i, j int) bool {
- return bytes.Compare(e[i].id.Bytes(), e[j].id.Bytes()) < 0
+ return bytes.Compare(e[i].metadata.BytesID(), e[j].metadata.BytesID()) < 0
}
func (e indexEntries) Swap(i, j int) {
@@ -128,15 +133,17 @@ func NewWriter(opts Options) (DataFileSetWriter, error) {
newDirectoryMode: opts.NewDirectoryMode(),
summariesPercent: opts.IndexSummariesPercent(),
bloomFilterFalsePositivePercent: opts.IndexBloomFilterFalsePositivePercent(),
+ bufferSize: bufferSize,
infoFdWithDigest: digest.NewFdWithDigestWriter(bufferSize),
indexFdWithDigest: digest.NewFdWithDigestWriter(bufferSize),
summariesFdWithDigest: digest.NewFdWithDigestWriter(bufferSize),
bloomFilterFdWithDigest: digest.NewFdWithDigestWriter(bufferSize),
dataFdWithDigest: digest.NewFdWithDigestWriter(bufferSize),
digestFdWithDigestContents: digest.NewFdWithDigestContentsWriter(bufferSize),
- encoder: msgpack.NewEncoder(),
+ encoder: msgpack.NewEncoderWithOptions(opts.EncodingOptions()),
digestBuf: digest.NewBuffer(),
singleCheckedBytes: make([]checked.Bytes, 1),
+ tagsIterator: ident.NewTagsIterator(ident.Tags{}),
tagEncoderPool: opts.TagEncoderPool(),
}, nil
}
@@ -250,26 +257,24 @@ func (w *writer) writeData(data []byte) error {
}
func (w *writer) Write(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
data checked.Bytes,
- checksum uint32,
+ dataChecksum uint32,
) error {
w.singleCheckedBytes[0] = data
- return w.WriteAll(id, tags, w.singleCheckedBytes, checksum)
+ return w.WriteAll(metadata, w.singleCheckedBytes, dataChecksum)
}
func (w *writer) WriteAll(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
data []checked.Bytes,
- checksum uint32,
+ dataChecksum uint32,
) error {
if w.err != nil {
return w.err
}
- if err := w.writeAll(id, tags, data, checksum); err != nil {
+ if err := w.writeAll(metadata, data, dataChecksum); err != nil {
w.err = err
return err
}
@@ -277,10 +282,9 @@ func (w *writer) WriteAll(
}
func (w *writer) writeAll(
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
data []checked.Bytes,
- checksum uint32,
+ dataChecksum uint32,
) error {
var size int64
for _, d := range data {
@@ -295,11 +299,10 @@ func (w *writer) writeAll(
entry := indexEntry{
index: w.currIdx,
- id: id,
- tags: tags,
+ metadata: metadata,
dataFileOffset: w.currOffset,
size: uint32(size),
- checksum: checksum,
+ dataChecksum: dataChecksum,
}
for _, d := range data {
if d == nil {
@@ -327,13 +330,40 @@ func (w *writer) Close() error {
}
// NB(xichen): only write out the checkpoint file if there are no errors
// encountered between calling writer.Open() and writer.Close().
- if err := w.writeCheckpointFile(); err != nil {
+ if err := writeCheckpointFile(
+ w.checkpointFilePath,
+ w.digestFdWithDigestContents.Digest().Sum32(),
+ w.digestBuf,
+ w.newFileMode,
+ ); err != nil {
w.err = err
return err
}
return nil
}
+func (w *writer) DeferClose() (persist.DataCloser, error) {
+ err := w.close()
+ if w.err != nil {
+ return nil, w.err
+ }
+ if err != nil {
+ w.err = err
+ return nil, err
+ }
+ checkpointFilePath := w.checkpointFilePath
+ digestChecksum := w.digestFdWithDigestContents.Digest().Sum32()
+ newFileMode := w.newFileMode
+ return func() error {
+ return writeCheckpointFile(
+ checkpointFilePath,
+ digestChecksum,
+ digest.NewBuffer(),
+ newFileMode,
+ )
+ }, nil
+}
+
func (w *writer) close() error {
if err := w.writeIndexRelatedFiles(); err != nil {
return err
@@ -359,21 +389,6 @@ func (w *writer) close() error {
)
}
-func (w *writer) writeCheckpointFile() error {
- fd, err := w.openWritable(w.checkpointFilePath)
- if err != nil {
- return err
- }
- digestChecksum := w.digestFdWithDigestContents.Digest().Sum32()
- if err := w.digestBuf.WriteDigestToFile(fd, digestChecksum); err != nil {
- // NB(prateek): intentionally skipping fd.Close() error, as failure
- // to write takes precedence over failure to close the file
- fd.Close()
- return err
- }
- return fd.Close()
-}
-
func (w *writer) openWritable(filePath string) (*os.File, error) {
return OpenWritable(filePath, w.newFileMode)
}
@@ -429,41 +444,48 @@ func (w *writer) writeIndexFileContents(
sort.Sort(w.indexEntries)
var (
- offset int64
- prevID []byte
- tagsIter = ident.NewTagsIterator(ident.Tags{})
- tagsEncoder = w.tagEncoderPool.Get()
+ offset int64
+ prevID []byte
+ tagsReuseable = w.tagsIterator
+ tagsEncoder = w.tagEncoderPool.Get()
)
defer tagsEncoder.Finalize()
- for i := range w.indexEntries {
- id := w.indexEntries[i].id.Bytes()
+ for i, entry := range w.indexEntries {
+ metadata := entry.metadata
+ id := metadata.BytesID()
// Need to check if i > 0 or we can never write an empty string ID
if i > 0 && bytes.Equal(id, prevID) {
// Should never happen, Write() should only be called once per ID
return fmt.Errorf("encountered duplicate ID: %s", id)
}
+ tagsIter, err := metadata.ResetOrReturnProvidedTagIterator(tagsReuseable)
+ if err != nil {
+ return err
+ }
+
var encodedTags []byte
- if tags := w.indexEntries[i].tags; tags.Values() != nil {
- tagsIter.Reset(tags)
+ if numTags := tagsIter.Remaining(); numTags > 0 {
tagsEncoder.Reset()
if err := tagsEncoder.Encode(tagsIter); err != nil {
return err
}
- data, ok := tagsEncoder.Data()
+
+ encodedTagsData, ok := tagsEncoder.Data()
if !ok {
return errWriterEncodeTagsDataNotAccessible
}
- encodedTags = data.Bytes()
+
+ encodedTags = encodedTagsData.Bytes()
}
entry := schema.IndexEntry{
- Index: w.indexEntries[i].index,
- ID: id,
- Size: int64(w.indexEntries[i].size),
- Offset: w.indexEntries[i].dataFileOffset,
- Checksum: int64(w.indexEntries[i].checksum),
- EncodedTags: encodedTags,
+ Index: entry.index,
+ ID: id,
+ Size: int64(entry.size),
+ Offset: entry.dataFileOffset,
+ DataChecksum: int64(entry.dataChecksum),
+ EncodedTags: encodedTags,
}
w.encoder.Reset()
@@ -506,7 +528,7 @@ func (w *writer) writeSummariesFileContents(
summary := schema.IndexSummary{
Index: w.indexEntries[i].index,
- ID: w.indexEntries[i].id.Bytes(),
+ ID: w.indexEntries[i].metadata.BytesID(),
IndexEntryOffset: w.indexEntries[i].indexFileOffset,
}
@@ -549,6 +571,7 @@ func (w *writer) writeInfoFileContents(
BlockSize: int64(w.blockSize),
Entries: w.currIdx,
MajorVersion: schema.MajorVersion,
+ MinorVersion: schema.MinorVersion,
Summaries: schema.IndexSummariesInfo{
Summaries: int64(summaries),
},
@@ -566,3 +589,22 @@ func (w *writer) writeInfoFileContents(
_, err = w.infoFdWithDigest.Write(w.encoder.Bytes())
return err
}
+
+func writeCheckpointFile(
+ checkpointFilePath string,
+ digestChecksum uint32,
+ digestBuf digest.Buffer,
+ newFileMode os.FileMode,
+) error {
+ fd, err := OpenWritable(checkpointFilePath, newFileMode)
+ if err != nil {
+ return err
+ }
+ if err := digestBuf.WriteDigestToFile(fd, digestChecksum); err != nil {
+ // NB(prateek): intentionally skipping fd.Close() error, as failure
+ // to write takes precedence over failure to close the file
+ fd.Close()
+ return err
+ }
+ return fd.Close()
+}
diff --git a/src/dbnode/persist/fs/write_test.go b/src/dbnode/persist/fs/write_test.go
index 2c8d718f1b..57ca954db8 100644
--- a/src/dbnode/persist/fs/write_test.go
+++ b/src/dbnode/persist/fs/write_test.go
@@ -43,7 +43,6 @@ func TestWriteReuseAfterError(t *testing.T) {
filePathPrefix := filepath.Join(dir, "")
defer os.RemoveAll(dir)
- seriesID := ident.StringID("series1")
w := newTestWriter(t, filePathPrefix)
writerOpts := DataWriterOpenOptions{
Identifier: FileSetFileIdentifier{
@@ -58,8 +57,20 @@ func TestWriteReuseAfterError(t *testing.T) {
data := checkedBytes([]byte{1, 2, 3})
require.NoError(t, w.Open(writerOpts))
- require.NoError(t, w.Write(seriesID, ident.Tags{}, data, 0))
- require.NoError(t, w.Write(seriesID, ident.Tags{}, data, 0))
+ require.NoError(t, w.Write(
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("series1"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
+ data,
+ 0))
+ require.NoError(t, w.Write(
+ persist.NewMetadataFromIDAndTags(
+ ident.StringID("series1"),
+ ident.Tags{},
+ persist.MetadataOptions{}),
+ data,
+ 0))
require.Error(t, w.Close())
require.NoError(t, w.Open(writerOpts))
diff --git a/src/dbnode/persist/persist_mock.go b/src/dbnode/persist/persist_mock.go
index 5de4408b0e..1ec52dcd02 100644
--- a/src/dbnode/persist/persist_mock.go
+++ b/src/dbnode/persist/persist_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/persist/types.go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -99,6 +99,18 @@ func (mr *MockManagerMockRecorder) StartIndexPersist() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartIndexPersist", reflect.TypeOf((*MockManager)(nil).StartIndexPersist))
}
+// Close mocks base method
+func (m *MockManager) Close() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Close")
+}
+
+// Close indicates an expected call of Close
+func (mr *MockManagerMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockManager)(nil).Close))
+}
+
// MockPreparer is a mock of Preparer interface
type MockPreparer struct {
ctrl *gomock.Controller
@@ -292,3 +304,40 @@ func (mr *MockIndexFlushMockRecorder) DoneIndex() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoneIndex", reflect.TypeOf((*MockIndexFlush)(nil).DoneIndex))
}
+
+// MockOnFlushSeries is a mock of OnFlushSeries interface
+type MockOnFlushSeries struct {
+ ctrl *gomock.Controller
+ recorder *MockOnFlushSeriesMockRecorder
+}
+
+// MockOnFlushSeriesMockRecorder is the mock recorder for MockOnFlushSeries
+type MockOnFlushSeriesMockRecorder struct {
+ mock *MockOnFlushSeries
+}
+
+// NewMockOnFlushSeries creates a new mock instance
+func NewMockOnFlushSeries(ctrl *gomock.Controller) *MockOnFlushSeries {
+ mock := &MockOnFlushSeries{ctrl: ctrl}
+ mock.recorder = &MockOnFlushSeriesMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockOnFlushSeries) EXPECT() *MockOnFlushSeriesMockRecorder {
+ return m.recorder
+}
+
+// OnFlushNewSeries mocks base method
+func (m *MockOnFlushSeries) OnFlushNewSeries(arg0 OnFlushNewSeriesEvent) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "OnFlushNewSeries", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// OnFlushNewSeries indicates an expected call of OnFlushNewSeries
+func (mr *MockOnFlushSeriesMockRecorder) OnFlushNewSeries(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnFlushNewSeries", reflect.TypeOf((*MockOnFlushSeries)(nil).OnFlushNewSeries), arg0)
+}
diff --git a/src/dbnode/persist/schema/types.go b/src/dbnode/persist/schema/types.go
index d46a0a0ea0..1579fe0e78 100644
--- a/src/dbnode/persist/schema/types.go
+++ b/src/dbnode/persist/schema/types.go
@@ -29,6 +29,11 @@ import (
// tooling needs to upgrade older files to newer files before a server restart
const MajorVersion = 1
+// MinorVersion is the minor schema version for a set of fileset files.
+// This is only incremented when *non-breaking* changes are introduced that
+// we want to have some level of control around how they're rolled out.
+const MinorVersion = 1
+
// IndexInfo stores metadata information about block filesets
type IndexInfo struct {
MajorVersion int64
@@ -41,6 +46,7 @@ type IndexInfo struct {
FileType persist.FileSetType
SnapshotID []byte
VolumeIndex int
+ MinorVersion int64
}
// IndexSummariesInfo stores metadata about the summaries
@@ -55,13 +61,17 @@ type IndexBloomFilterInfo struct {
}
// IndexEntry stores entry-level data indexing
+//
+// When serialized to disk, the encoder will automatically add the IndexEntryChecksum, a checksum to validate
+// the index entry itself, to the end of the entry. That field is not exposed on this struct as this is handled
+// transparently by the encoder and decoder. Appending of checksum starts in V3.
type IndexEntry struct {
- Index int64
- ID []byte
- Size int64
- Offset int64
- Checksum int64
- EncodedTags []byte
+ Index int64
+ ID []byte
+ Size int64
+ Offset int64
+ DataChecksum int64
+ EncodedTags []byte
}
// IndexSummary stores a summary of an index entry to lookup
diff --git a/src/dbnode/persist/schema/version_checker.go b/src/dbnode/persist/schema/version_checker.go
new file mode 100644
index 0000000000..ac14edec07
--- /dev/null
+++ b/src/dbnode/persist/schema/version_checker.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package schema
+
+// VersionChecker centralizes logic for checking if a major, minor version combo supports
+// specific functionality
+type VersionChecker struct {
+ majorVersion int
+ minorVersion int
+}
+
+// NewVersionChecker creates a new VersionChecker
+func NewVersionChecker(majorVersion int, minorVersion int) VersionChecker {
+ return VersionChecker{
+ majorVersion: majorVersion,
+ minorVersion: minorVersion,
+ }
+}
+
+// IndexEntryValidationEnabled checks the version to determine if
+// fileset files of the specified version allow for doing checksum validation
+// on individual index entries
+func (v *VersionChecker) IndexEntryValidationEnabled() bool {
+ return v.majorVersion >= 2 || v.majorVersion == 1 && v.minorVersion >= 1
+}
diff --git a/src/dbnode/persist/schema/version_checker_test.go b/src/dbnode/persist/schema/version_checker_test.go
new file mode 100644
index 0000000000..217c38e1d5
--- /dev/null
+++ b/src/dbnode/persist/schema/version_checker_test.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package schema
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestIndexEntryValidationEnabled(t *testing.T) {
+ checker := NewVersionChecker(1, 1)
+ require.True(t, checker.IndexEntryValidationEnabled())
+
+ checker = NewVersionChecker(1, 2)
+ require.True(t, checker.IndexEntryValidationEnabled())
+
+ checker = NewVersionChecker(2, 1)
+ require.True(t, checker.IndexEntryValidationEnabled())
+
+ checker = NewVersionChecker(2, 0)
+ require.True(t, checker.IndexEntryValidationEnabled())
+}
+
+func TestIndexEntryValidationDisabled(t *testing.T) {
+ checker := NewVersionChecker(1, 0)
+ require.False(t, checker.IndexEntryValidationEnabled())
+}
diff --git a/src/dbnode/persist/types.go b/src/dbnode/persist/types.go
index 4912883225..0568e0099c 100644
--- a/src/dbnode/persist/types.go
+++ b/src/dbnode/persist/types.go
@@ -21,28 +21,143 @@
package persist
import (
+ "errors"
"fmt"
"time"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/ident"
"github.com/pborman/uuid"
)
+var (
+ errReuseableTagIteratorRequired = errors.New("reuseable tags iterator is required")
+)
+
+// Metadata is metadata for a time series, it can
+// have several underlying sources.
+type Metadata struct {
+ metadata doc.Document
+ id ident.ID
+ tags ident.Tags
+ tagsIter ident.TagIterator
+ opts MetadataOptions
+}
+
+// MetadataOptions is options to use when creating metadata.
+type MetadataOptions struct {
+ FinalizeID bool
+ FinalizeTags bool
+ FinalizeTagIterator bool
+}
+
+// NewMetadata returns a new metadata struct from series metadata.
+// Note: because doc.Document has no pools for finalization we do not
+// take MetadataOptions here, in future if we have pools or
+// some other shared options that Metadata needs we will add it to this
+// constructor as well.
+func NewMetadata(metadata doc.Document) Metadata {
+ return Metadata{metadata: metadata}
+}
+
+// NewMetadataFromIDAndTags returns a new metadata struct from
+// explicit ID and tags.
+func NewMetadataFromIDAndTags(
+ id ident.ID,
+ tags ident.Tags,
+ opts MetadataOptions,
+) Metadata {
+ return Metadata{
+ id: id,
+ tags: tags,
+ opts: opts,
+ }
+}
+
+// NewMetadataFromIDAndTagIterator returns a new metadata struct from
+// explicit ID and tag iterator.
+func NewMetadataFromIDAndTagIterator(
+ id ident.ID,
+ tagsIter ident.TagIterator,
+ opts MetadataOptions,
+) Metadata {
+ return Metadata{
+ id: id,
+ tagsIter: tagsIter,
+ opts: opts,
+ }
+}
+
+// BytesID returns the bytes ID of the series.
+func (m Metadata) BytesID() []byte {
+ if m.id != nil {
+ return m.id.Bytes()
+ }
+ return m.metadata.ID
+}
+
+// ResetOrReturnProvidedTagIterator returns a tag iterator
+// for the series, returning a direct ref to a provided tag
+// iterator or using the reuseable tag iterator provided by the
+// callsite if it needs to iterate over tags or fields.
+func (m Metadata) ResetOrReturnProvidedTagIterator(
+ reuseableTagsIterator ident.TagsIterator,
+) (ident.TagIterator, error) {
+ if reuseableTagsIterator == nil {
+ // Always check to make sure callsites won't
+ // get a bad allocation pattern of having
+ // to create one here inline if the metadata
+ // they are passing in suddenly changes from
+ // tagsIter to tags or fields with metadata.
+ return nil, errReuseableTagIteratorRequired
+ }
+ if m.tagsIter != nil {
+ return m.tagsIter, nil
+ }
+
+ if len(m.tags.Values()) > 0 {
+ reuseableTagsIterator.Reset(m.tags)
+ return reuseableTagsIterator, reuseableTagsIterator.Err()
+ }
+
+ reuseableTagsIterator.ResetFields(m.metadata.Fields)
+ return reuseableTagsIterator, reuseableTagsIterator.Err()
+}
+
+// Finalize will finalize any resources that requested
+// to be finalized.
+func (m Metadata) Finalize() {
+ if m.opts.FinalizeID && m.id != nil {
+ m.id.Finalize()
+ }
+ if m.opts.FinalizeTags && m.tags.Values() != nil {
+ m.tags.Finalize()
+ }
+ if m.opts.FinalizeTagIterator && m.tagsIter != nil {
+ m.tagsIter.Close()
+ }
+}
+
// DataFn is a function that persists a m3db segment for a given ID.
-type DataFn func(id ident.ID, tags ident.Tags, segment ts.Segment, checksum uint32) error
+type DataFn func(metadata Metadata, segment ts.Segment, checksum uint32) error
// DataCloser is a function that performs cleanup after persisting the data
// blocks for a (shard, blockStart) combination.
type DataCloser func() error
+// DeferCloser returns a DataCloser that persists the data checkpoint file when called.
+type DeferCloser func() (DataCloser, error)
+
// PreparedDataPersist is an object that wraps holds a persist function and a closer.
type PreparedDataPersist struct {
- Persist DataFn
- Close DataCloser
+ Persist DataFn
+ Close DataCloser
+ DeferClose DeferCloser
}
// CommitLogFiles represents a slice of commitlog files.
@@ -89,6 +204,8 @@ type Manager interface {
// StartIndexPersist begins a flush for index data.
StartIndexPersist() (IndexFlush, error)
+
+ Close()
}
// Preparer can generate a PreparedDataPersist object for writing data for
@@ -153,6 +270,7 @@ type IndexPrepareOptions struct {
BlockStart time.Time
FileSetType FileSetType
Shards map[uint32]struct{}
+ IndexVolumeType idxpersist.IndexVolumeType
}
// DataPrepareSnapshotOptions is the options struct for the Prepare method that contains
@@ -202,3 +320,27 @@ const (
// FileSetIndexContentType indicates that the fileset files contain time series index metadata
FileSetIndexContentType
)
+
+// OnFlushNewSeriesEvent is the fields related to a flush of a new series.
+type OnFlushNewSeriesEvent struct {
+ Shard uint32
+ BlockStart time.Time
+ FirstWrite time.Time
+ SeriesMetadata doc.Document
+}
+
+// OnFlushSeries performs work on a per series level.
+type OnFlushSeries interface {
+ OnFlushNewSeries(OnFlushNewSeriesEvent) error
+}
+
+// NoOpColdFlushNamespace is a no-op impl of OnFlushSeries.
+type NoOpColdFlushNamespace struct{}
+
+// OnFlushNewSeries is a no-op.
+func (n *NoOpColdFlushNamespace) OnFlushNewSeries(event OnFlushNewSeriesEvent) error {
+ return nil
+}
+
+// Done is a no-op.
+func (n *NoOpColdFlushNamespace) Done() error { return nil }
diff --git a/src/dbnode/runtime/runtime_mock.go b/src/dbnode/runtime/runtime_mock.go
index e896f18120..aa45644751 100644
--- a/src/dbnode/runtime/runtime_mock.go
+++ b/src/dbnode/runtime/runtime_mock.go
@@ -184,6 +184,34 @@ func (mr *MockOptionsMockRecorder) WriteNewSeriesLimitPerShardPerSecond() *gomoc
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteNewSeriesLimitPerShardPerSecond", reflect.TypeOf((*MockOptions)(nil).WriteNewSeriesLimitPerShardPerSecond))
}
+// SetEncodersPerBlockLimit mocks base method
+func (m *MockOptions) SetEncodersPerBlockLimit(value int) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetEncodersPerBlockLimit", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetEncodersPerBlockLimit indicates an expected call of SetEncodersPerBlockLimit
+func (mr *MockOptionsMockRecorder) SetEncodersPerBlockLimit(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEncodersPerBlockLimit", reflect.TypeOf((*MockOptions)(nil).SetEncodersPerBlockLimit), value)
+}
+
+// EncodersPerBlockLimit mocks base method
+func (m *MockOptions) EncodersPerBlockLimit() int {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EncodersPerBlockLimit")
+ ret0, _ := ret[0].(int)
+ return ret0
+}
+
+// EncodersPerBlockLimit indicates an expected call of EncodersPerBlockLimit
+func (mr *MockOptionsMockRecorder) EncodersPerBlockLimit() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EncodersPerBlockLimit", reflect.TypeOf((*MockOptions)(nil).EncodersPerBlockLimit))
+}
+
// SetTickSeriesBatchSize mocks base method
func (m *MockOptions) SetTickSeriesBatchSize(value int) Options {
m.ctrl.T.Helper()
diff --git a/src/dbnode/runtime/runtime_options.go b/src/dbnode/runtime/runtime_options.go
index 4df8c965dd..8baf6f15d0 100644
--- a/src/dbnode/runtime/runtime_options.go
+++ b/src/dbnode/runtime/runtime_options.go
@@ -48,7 +48,7 @@ const (
defaultTickSeriesBatchSize = 512
defaultTickPerSeriesSleepDuration = 100 * time.Microsecond
defaultTickMinimumInterval = 10 * time.Second
- defaultMaxWiredBlocks = uint(1 << 18) // 262,144
+ defaultMaxWiredBlocks = uint(1 << 16) // 65,536
)
var (
@@ -67,6 +67,7 @@ type options struct {
writeNewSeriesAsync bool
writeNewSeriesBackoffDuration time.Duration
writeNewSeriesLimitPerShardPerSecond int
+ encodersPerBlockLimit int
tickSeriesBatchSize int
tickPerSeriesSleepDuration time.Duration
tickMinimumInterval time.Duration
@@ -160,6 +161,16 @@ func (o *options) WriteNewSeriesLimitPerShardPerSecond() int {
return o.writeNewSeriesLimitPerShardPerSecond
}
+func (o *options) SetEncodersPerBlockLimit(value int) Options {
+ opts := *o
+ opts.encodersPerBlockLimit = value
+ return &opts
+}
+
+func (o *options) EncodersPerBlockLimit() int {
+ return o.encodersPerBlockLimit
+}
+
func (o *options) SetTickSeriesBatchSize(value int) Options {
opts := *o
opts.tickSeriesBatchSize = value
diff --git a/src/dbnode/runtime/types.go b/src/dbnode/runtime/types.go
index e10681391c..cafe9f8046 100644
--- a/src/dbnode/runtime/types.go
+++ b/src/dbnode/runtime/types.go
@@ -77,6 +77,20 @@ type Options interface {
// time series being inserted.
WriteNewSeriesLimitPerShardPerSecond() int
+ // SetEncodersPerBlockLimit sets the maximum number of encoders per block
+ // allowed. Setting to zero means an unlimited number of encoders are
+ // permitted. This rate limit is primarily offered to defend against
+ // bursts of out of order writes, which creates many encoders, subsequently
+ // causing a large burst in CPU load when trying to merge them.
+ SetEncodersPerBlockLimit(value int) Options
+
+ // EncodersPerBlockLimit sets the maximum number of encoders per block
+ // allowed. Setting to zero means an unlimited number of encoders are
+ // permitted. This rate limit is primarily offered to defend against
+ // bursts of out of order writes, which creates many encoders, subsequently
+ // causing a large burst in CPU load when trying to merge them.
+ EncodersPerBlockLimit() int
+
// SetTickSeriesBatchSize sets the batch size to process series together
// during a tick before yielding and sleeping the per series duration
// multiplied by the batch size.
diff --git a/src/dbnode/server/options.go b/src/dbnode/server/options.go
new file mode 100644
index 0000000000..36dc8ccf79
--- /dev/null
+++ b/src/dbnode/server/options.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package server
+
+import (
+ "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/node"
+ "github.com/m3db/m3/src/dbnode/storage"
+)
+
+// StorageOptions are options to apply to the database storage options.
+type StorageOptions struct {
+ OnColdFlush storage.OnColdFlush
+ ForceColdWritesEnabled bool
+ TChanNodeServerFn node.NewTChanNodeServerFn
+}
diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go
index 3a072f4bf1..da343e8a86 100644
--- a/src/dbnode/server/server.go
+++ b/src/dbnode/server/server.go
@@ -59,14 +59,17 @@ import (
"github.com/m3db/m3/src/dbnode/ratelimit"
"github.com/m3db/m3/src/dbnode/retention"
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/cluster"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/series"
+ "github.com/m3db/m3/src/dbnode/storage/stats"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
xtchannel "github.com/m3db/m3/src/dbnode/x/tchannel"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
@@ -134,6 +137,15 @@ type RunOptions struct {
// InterruptCh is a programmatic interrupt channel to supply to
// interrupt and shutdown the server.
InterruptCh <-chan error
+
+ // QueryStatsTrackerFn returns a tracker for tracking query stats.
+ QueryStatsTrackerFn func(instrument.Options, stats.QueryStatsOptions) stats.QueryStatsTracker
+
+ // CustomOptions are custom options to apply to the session.
+ CustomOptions []client.CustomAdminOption
+
+ // StorageOptions are options to apply to the database storage options.
+ StorageOptions StorageOptions
}
// Run runs the server programmatically given a filename for the
@@ -273,7 +285,7 @@ func Run(runOpts RunOptions) {
logger.Info("using seed nodes etcd cluster",
zap.String("zone", zone), zap.Strings("endpoints", endpoints))
- service.Service.ETCDClusters = []etcd.ClusterConfig{etcd.ClusterConfig{
+ service.Service.ETCDClusters = []etcd.ClusterConfig{{
Zone: zone,
Endpoints: endpoints,
}}
@@ -311,12 +323,18 @@ func Run(runOpts RunOptions) {
}
}
+ // By default use histogram timers for timers that
+ // are constructed allowing for type to be picked
+ // by the caller using instrument.NewTimer(...).
+ timerOpts := instrument.NewHistogramTimerOptions(instrument.HistogramTimerOptions{})
+ timerOpts.StandardSampleRate = cfg.Metrics.SampleRate()
+
var (
opts = storage.NewOptions()
iopts = opts.InstrumentOptions().
SetLogger(logger).
SetMetricsScope(scope).
- SetMetricsSamplingRate(cfg.Metrics.SampleRate()).
+ SetTimerOptions(timerOpts).
SetTracer(tracer)
)
opts = opts.SetInstrumentOptions(iopts)
@@ -391,6 +409,29 @@ func Run(runOpts RunOptions) {
}
defer stopReporting()
+ // Setup query stats tracking.
+ statsOpts := stats.QueryStatsOptions{
+ Lookback: stats.DefaultLookback,
+ }
+ if max := runOpts.Config.Limits.MaxRecentlyQueriedSeriesBlocks; max != nil {
+ statsOpts = stats.QueryStatsOptions{
+ MaxDocs: max.Value,
+ Lookback: max.Lookback,
+ }
+ }
+ if err := statsOpts.Validate(); err != nil {
+ logger.Fatal("could not construct query stats options from config", zap.Error(err))
+ }
+
+ tracker := stats.DefaultQueryStatsTracker(iopts, statsOpts)
+ if runOpts.QueryStatsTrackerFn != nil {
+ tracker = runOpts.QueryStatsTrackerFn(iopts, statsOpts)
+ }
+
+ queryStats := stats.NewQueryStats(tracker)
+ queryStats.Start()
+ defer queryStats.Stop()
+
// FOLLOWUP(prateek): remove this once we have the runtime options<->index wiring done
indexOpts := opts.IndexOptions()
insertMode := index.InsertSync
@@ -403,7 +444,8 @@ func Run(runOpts RunOptions) {
CacheRegexp: plCacheConfig.CacheRegexpOrDefault(),
CacheTerms: plCacheConfig.CacheTermsOrDefault(),
}).
- SetMmapReporter(mmapReporter)
+ SetMmapReporter(mmapReporter).
+ SetQueryStats(queryStats)
opts = opts.SetIndexOptions(indexOpts)
if tick := cfg.Tick; tick != nil {
@@ -429,7 +471,7 @@ func Run(runOpts RunOptions) {
scope.SubScope("tag-encoder-pool")))
tagEncoderPool.Init()
tagDecoderPool := serialize.NewTagDecoderPool(
- serialize.NewTagDecoderOptions(),
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
poolOptions(
policy.TagDecoderPool,
scope.SubScope("tag-decoder-pool")))
@@ -523,12 +565,12 @@ func Run(runOpts RunOptions) {
SetFetchConcurrency(blockRetrieveCfg.FetchConcurrency)
}
blockRetrieverMgr := block.NewDatabaseBlockRetrieverManager(
- func(md namespace.Metadata) (block.DatabaseBlockRetriever, error) {
+ func(md namespace.Metadata, shardSet sharding.ShardSet) (block.DatabaseBlockRetriever, error) {
retriever, err := fs.NewBlockRetriever(retrieverOpts, fsopts)
if err != nil {
return nil, err
}
- if err := retriever.Open(md); err != nil {
+ if err := retriever.Open(md, shardSet); err != nil {
return nil, err
}
return retriever, nil
@@ -550,9 +592,10 @@ func Run(runOpts RunOptions) {
logger.Info("creating dynamic config service client with m3cluster")
envCfg, err = cfg.EnvironmentConfig.Configure(environment.ConfigurationParameters{
- InstrumentOpts: iopts,
- HashingSeed: cfg.Hashing.Seed,
- NewDirectoryMode: newDirectoryMode,
+ InstrumentOpts: iopts,
+ HashingSeed: cfg.Hashing.Seed,
+ NewDirectoryMode: newDirectoryMode,
+ ForceColdWritesEnabled: runOpts.StorageOptions.ForceColdWritesEnabled,
})
if err != nil {
logger.Fatal("could not initialize dynamic config", zap.Error(err))
@@ -561,8 +604,9 @@ func Run(runOpts RunOptions) {
logger.Info("creating static config service client with m3cluster")
envCfg, err = cfg.EnvironmentConfig.Configure(environment.ConfigurationParameters{
- InstrumentOpts: iopts,
- HostID: hostID,
+ InstrumentOpts: iopts,
+ HostID: hostID,
+ ForceColdWritesEnabled: runOpts.StorageOptions.ForceColdWritesEnabled,
})
if err != nil {
logger.Fatal("could not initialize static config", zap.Error(err))
@@ -604,8 +648,13 @@ func Run(runOpts RunOptions) {
tchannelOpts.MaxIdleTime = cfg.TChannel.MaxIdleTime
tchannelOpts.IdleCheckInterval = cfg.TChannel.IdleCheckInterval
}
+ tchanOpts := ttnode.NewOptions(tchannelOpts).
+ SetInstrumentOptions(opts.InstrumentOptions())
+ if fn := runOpts.StorageOptions.TChanNodeServerFn; fn != nil {
+ tchanOpts = tchanOpts.SetTChanNodeServerFn(fn)
+ }
tchannelthriftNodeClose, err := ttnode.NewServer(service,
- cfg.ListenAddress, contextPool, tchannelOpts).ListenAndServe()
+ cfg.ListenAddress, contextPool, tchanOpts).ListenAndServe()
if err != nil {
logger.Fatal("could not open tchannelthrift interface",
zap.String("address", cfg.ListenAddress), zap.Error(err))
@@ -699,8 +748,10 @@ func Run(runOpts RunOptions) {
origin := topology.NewHost(hostID, "")
m3dbClient, err := newAdminClient(
- cfg.Client, iopts, tchannelOpts, syncCfg.TopologyInitializer, runtimeOptsMgr,
- origin, protoEnabled, schemaRegistry, syncCfg.KVStore, logger)
+ cfg.Client, iopts, tchannelOpts, syncCfg.TopologyInitializer,
+ runtimeOptsMgr, origin, protoEnabled, schemaRegistry,
+ syncCfg.KVStore, logger, runOpts.CustomOptions)
+
if err != nil {
logger.Fatal("could not create m3db client", zap.Error(err))
}
@@ -734,8 +785,9 @@ func Run(runOpts RunOptions) {
// Guaranteed to not be nil if repair is enabled by config validation.
clientCfg := *cluster.Client
clusterClient, err := newAdminClient(
- clientCfg, iopts, tchannelOpts, topologyInitializer, runtimeOptsMgr,
- origin, protoEnabled, schemaRegistry, syncCfg.KVStore, logger)
+ clientCfg, iopts, tchannelOpts, topologyInitializer,
+ runtimeOptsMgr, origin, protoEnabled, schemaRegistry,
+ syncCfg.KVStore, logger, runOpts.CustomOptions)
if err != nil {
logger.Fatal(
"unable to create client for replicated cluster",
@@ -773,6 +825,10 @@ func Run(runOpts RunOptions) {
opts = opts.SetRepairEnabled(false)
}
+ if runOpts.StorageOptions.OnColdFlush != nil {
+ opts = opts.SetOnColdFlush(runOpts.StorageOptions.OnColdFlush)
+ }
+
// Set bootstrap options - We need to create a topology map provider from the
// same topology that will be passed to the cluster so that when we make
// bootstrapping decisions they are in sync with the clustered database
@@ -889,6 +945,8 @@ func Run(runOpts RunOptions) {
// Only set the write new series limit after bootstrapping
kvWatchNewSeriesLimitPerShard(syncCfg.KVStore, logger, topo,
runtimeOptsMgr, cfg.WriteNewSeriesLimitPerSecond)
+ kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger,
+ runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock)
}()
// Wait for process interrupt.
@@ -1005,6 +1063,62 @@ func kvWatchNewSeriesLimitPerShard(
}()
}
+func kvWatchEncodersPerBlockLimit(
+ store kv.Store,
+ logger *zap.Logger,
+ runtimeOptsMgr m3dbruntime.OptionsManager,
+ defaultEncodersPerBlockLimit int,
+) {
+ var initEncoderLimit int
+
+ value, err := store.Get(kvconfig.EncodersPerBlockLimitKey)
+ if err == nil {
+ protoValue := &commonpb.Int64Proto{}
+ err = value.Unmarshal(protoValue)
+ if err == nil {
+ initEncoderLimit = int(protoValue.Value)
+ }
+ }
+
+ if err != nil {
+ if err != kv.ErrNotFound {
+ logger.Warn("error resolving encoder per block limit", zap.Error(err))
+ }
+ initEncoderLimit = defaultEncodersPerBlockLimit
+ }
+
+ err = setEncodersPerBlockLimitOnChange(runtimeOptsMgr, initEncoderLimit)
+ if err != nil {
+ logger.Warn("unable to set encoder per block limit", zap.Error(err))
+ }
+
+ watch, err := store.Watch(kvconfig.EncodersPerBlockLimitKey)
+ if err != nil {
+ logger.Error("could not watch encoder per block limit", zap.Error(err))
+ return
+ }
+
+ go func() {
+ protoValue := &commonpb.Int64Proto{}
+ for range watch.C() {
+ value := defaultEncodersPerBlockLimit
+ if newValue := watch.Get(); newValue != nil {
+ if err := newValue.Unmarshal(protoValue); err != nil {
+ logger.Warn("unable to parse new encoder per block limit", zap.Error(err))
+ continue
+ }
+ value = int(protoValue.Value)
+ }
+
+ err = setEncodersPerBlockLimitOnChange(runtimeOptsMgr, value)
+ if err != nil {
+ logger.Warn("unable to set encoder per block limit", zap.Error(err))
+ continue
+ }
+ }
+ }()
+}
+
func kvWatchClientConsistencyLevels(
store kv.Store,
logger *zap.Logger,
@@ -1164,6 +1278,21 @@ func clusterLimitToPlacedShardLimit(topo topology.Topology, clusterLimit int) in
return nodeLimit
}
+func setEncodersPerBlockLimitOnChange(
+ runtimeOptsMgr m3dbruntime.OptionsManager,
+ encoderLimit int,
+) error {
+ runtimeOpts := runtimeOptsMgr.Get()
+ if runtimeOpts.EncodersPerBlockLimit() == encoderLimit {
+ // Not changed, no need to set the value and trigger a runtime options update
+ return nil
+ }
+
+ newRuntimeOpts := runtimeOpts.
+ SetEncodersPerBlockLimit(encoderLimit)
+ return runtimeOptsMgr.Update(newRuntimeOpts)
+}
+
// this function will block for at most waitTimeout to try to get an initial value
// before we kick off the bootstrap
func kvWatchBootstrappers(
@@ -1347,7 +1476,7 @@ func withEncodingAndPoolingOptions(
InstrumentOptions().
SetMetricsScope(scope.SubScope("write-batch-pool")))
- writeBatchPool := ts.NewWriteBatchPool(
+ writeBatchPool := writes.NewWriteBatchPool(
writeBatchPoolOpts,
writeBatchPoolInitialBatchSize,
writeBatchPoolMaxBatchSize)
@@ -1499,6 +1628,8 @@ func withEncodingAndPoolingOptions(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-query-results-pool")))
aggregateQueryResultsPool := index.NewAggregateResultsPool(
poolOptions(policy.IndexResultsPool, scope.SubScope("index-aggregate-results-pool")))
+ aggregateQueryValuesPool := index.NewAggregateValuesPool(
+ poolOptions(policy.IndexResultsPool, scope.SubScope("index-aggregate-values-pool")))
// Set value transformation options.
opts = opts.SetTruncateType(cfg.Transforms.TruncateBy)
@@ -1529,6 +1660,7 @@ func withEncodingAndPoolingOptions(
SetCheckedBytesPool(bytesPool).
SetQueryResultsPool(queryResultsPool).
SetAggregateResultsPool(aggregateQueryResultsPool).
+ SetAggregateValuesPool(aggregateQueryValuesPool).
SetForwardIndexProbability(cfg.Index.ForwardIndexProbability).
SetForwardIndexThreshold(cfg.Index.ForwardIndexThreshold)
@@ -1542,6 +1674,11 @@ func withEncodingAndPoolingOptions(
// it sees the same reference of the options as is set for the DB.
return index.NewAggregateResults(nil, index.AggregateResultsOptions{}, indexOpts)
})
+ aggregateQueryValuesPool.Init(func() index.AggregateValues {
+ // NB(r): Need to initialize after setting the index opts so
+ // it sees the same reference of the options as is set for the DB.
+ return index.NewAggregateValues(indexOpts)
+ })
return opts.SetIndexOptions(indexOpts)
}
@@ -1557,6 +1694,7 @@ func newAdminClient(
schemaRegistry namespace.SchemaRegistry,
kvStore kv.Store,
logger *zap.Logger,
+ custom []client.CustomAdminOption,
) (client.AdminClient, error) {
if config.EnvironmentConfig != nil {
// If the user has provided an override for the dynamic client configuration
@@ -1564,12 +1702,8 @@ func newAdminClient(
topologyInitializer = nil
}
- m3dbClient, err := config.NewAdminClient(
- client.ConfigurationParameters{
- InstrumentOptions: iopts.
- SetMetricsScope(iopts.MetricsScope().SubScope("m3dbclient")),
- TopologyInitializer: topologyInitializer,
- },
+ // NB: append custom options coming from run options to existing options.
+ options := []client.CustomAdminOption{
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetChannelOptions(tchannelOpts).(client.AdminOptions)
},
@@ -1591,6 +1725,16 @@ func newAdminClient(
func(opts client.AdminOptions) client.AdminOptions {
return opts.SetSchemaRegistry(schemaRegistry).(client.AdminOptions)
},
+ }
+
+ options = append(options, custom...)
+ m3dbClient, err := config.NewAdminClient(
+ client.ConfigurationParameters{
+ InstrumentOptions: iopts.
+ SetMetricsScope(iopts.MetricsScope().SubScope("m3dbclient")),
+ TopologyInitializer: topologyInitializer,
+ },
+ options...,
)
if err != nil {
return nil, err
diff --git a/src/dbnode/sharding/shardset.go b/src/dbnode/sharding/shardset.go
index e890e4db4d..5bf836a1d2 100644
--- a/src/dbnode/sharding/shardset.go
+++ b/src/dbnode/sharding/shardset.go
@@ -26,8 +26,7 @@ import (
"github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/x/ident"
-
- "github.com/spaolacci/murmur3"
+ "github.com/m3db/stackmurmur3/v2"
)
var (
@@ -161,6 +160,6 @@ func NewHashGenWithSeed(seed uint32) HashGen {
// NewHashFn generates a HashFN based on murmur32 with a given seed
func NewHashFn(length int, seed uint32) HashFn {
return func(id ident.ID) uint32 {
- return murmur3.Sum32WithSeed(id.Bytes(), seed) % uint32(length)
+ return murmur3.SeedSum32(seed, id.Bytes()) % uint32(length)
}
}
diff --git a/src/dbnode/sharding/shardset_test.go b/src/dbnode/sharding/shardset_test.go
index af978877f1..1b16193a74 100644
--- a/src/dbnode/sharding/shardset_test.go
+++ b/src/dbnode/sharding/shardset_test.go
@@ -25,7 +25,6 @@ import (
"github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/x/ident"
-
"github.com/stretchr/testify/require"
)
diff --git a/src/dbnode/storage/README.md b/src/dbnode/storage/README.md
new file mode 100644
index 0000000000..0c54d521f1
--- /dev/null
+++ b/src/dbnode/storage/README.md
@@ -0,0 +1,20 @@
+# storage
+
+Storage related documentation.
+
+## Flush consistency model
+
+Flush occurs in the following steps:
+ - data warm flush
+ - rotate commit log
+ - data cold flush
+ - rotate cold mutable index segments
+ - flush cold tsdb data and write most files to disk (except checkpoint files)
+ - flush cold index data to disk and reload
+ - evict rotated cold mutable index segments
+ - write tsdb checkpoint files (completes the tsdb cold flush lifecycle)
+ - data snapshot
+ - drops rotated commit log when we are done
+ - index flush
+
+Since we rotate the commit log before we perform a data cold flush and only drop the rotate commit log after data snapshotting is done we guarantee that no writes will be lost if the node crashes. After data cold flush completes, any new cold writes will exist in the active commit log (and not be dropped) when data snapshotting finishes. This is why data snapshotting only needs to snapshot warm data blocks (that need to be flushed).
diff --git a/src/dbnode/storage/block/block.go b/src/dbnode/storage/block/block.go
index 7ab908aca2..0fdb4c9ba2 100644
--- a/src/dbnode/storage/block/block.go
+++ b/src/dbnode/storage/block/block.go
@@ -26,7 +26,6 @@ import (
"sync/atomic"
"time"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
@@ -288,7 +287,8 @@ func (b *dbBlock) streamWithRLock(ctx context.Context) (xio.BlockReader, error)
data.AppendAll(b.segment.Tail.Bytes())
}
data.DecRef()
- segmentReader.Reset(ts.NewSegment(data, nil, ts.FinalizeHead))
+ checksum := b.segment.CalculateChecksum()
+ segmentReader.Reset(ts.NewSegment(data, nil, checksum, ts.FinalizeHead))
ctx.RegisterFinalizer(segmentReader)
blockReader := xio.BlockReader{
@@ -331,7 +331,7 @@ func (b *dbBlock) resetNewBlockStartWithLock(start time.Time, blockSize time.Dur
func (b *dbBlock) resetSegmentWithLock(seg ts.Segment) {
b.segment = seg
b.length = seg.Len()
- b.checksum = digest.SegmentChecksum(seg)
+ b.checksum = seg.CalculateChecksum()
b.seriesID = nil
b.wasRetrievedFromDisk = false
}
diff --git a/src/dbnode/storage/block/block_mock.go b/src/dbnode/storage/block/block_mock.go
index 186ffde470..fef13847af 100644
--- a/src/dbnode/storage/block/block_mock.go
+++ b/src/dbnode/storage/block/block_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/storage/block/types.go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/x/clock"
@@ -883,6 +884,18 @@ func (mr *MockDatabaseBlockRetrieverMockRecorder) Stream(ctx, shard, id, blockSt
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stream", reflect.TypeOf((*MockDatabaseBlockRetriever)(nil).Stream), ctx, shard, id, blockStart, onRetrieve, nsCtx)
}
+// AssignShardSet mocks base method
+func (m *MockDatabaseBlockRetriever) AssignShardSet(shardSet sharding.ShardSet) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "AssignShardSet", shardSet)
+}
+
+// AssignShardSet indicates an expected call of AssignShardSet
+func (mr *MockDatabaseBlockRetrieverMockRecorder) AssignShardSet(shardSet interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssignShardSet", reflect.TypeOf((*MockDatabaseBlockRetriever)(nil).AssignShardSet), shardSet)
+}
+
// MockDatabaseShardBlockRetriever is a mock of DatabaseShardBlockRetriever interface
type MockDatabaseShardBlockRetriever struct {
ctrl *gomock.Controller
@@ -945,18 +958,18 @@ func (m *MockDatabaseBlockRetrieverManager) EXPECT() *MockDatabaseBlockRetriever
}
// Retriever mocks base method
-func (m *MockDatabaseBlockRetrieverManager) Retriever(nsMetadata namespace.Metadata) (DatabaseBlockRetriever, error) {
+func (m *MockDatabaseBlockRetrieverManager) Retriever(nsMetadata namespace.Metadata, shardSet sharding.ShardSet) (DatabaseBlockRetriever, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Retriever", nsMetadata)
+ ret := m.ctrl.Call(m, "Retriever", nsMetadata, shardSet)
ret0, _ := ret[0].(DatabaseBlockRetriever)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Retriever indicates an expected call of Retriever
-func (mr *MockDatabaseBlockRetrieverManagerMockRecorder) Retriever(nsMetadata interface{}) *gomock.Call {
+func (mr *MockDatabaseBlockRetrieverManagerMockRecorder) Retriever(nsMetadata, shardSet interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Retriever", reflect.TypeOf((*MockDatabaseBlockRetrieverManager)(nil).Retriever), nsMetadata)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Retriever", reflect.TypeOf((*MockDatabaseBlockRetrieverManager)(nil).Retriever), nsMetadata, shardSet)
}
// MockDatabaseShardBlockRetrieverManager is a mock of DatabaseShardBlockRetrieverManager interface
diff --git a/src/dbnode/storage/block/block_proto_test.go b/src/dbnode/storage/block/block_proto_test.go
index 2408735b6c..3c758fe0dd 100644
--- a/src/dbnode/storage/block/block_proto_test.go
+++ b/src/dbnode/storage/block/block_proto_test.go
@@ -25,7 +25,6 @@ import (
"time"
"github.com/golang/mock/gomock"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/testdata/prototest"
"github.com/m3db/m3/src/dbnode/ts"
@@ -124,7 +123,7 @@ func TestDatabaseBlockMergeProto(t *testing.T) {
// Make sure the checksum was updated
mergedChecksum, err := block1.Checksum()
require.NoError(t, err)
- require.Equal(t, digest.SegmentChecksum(seg), mergedChecksum)
+ require.Equal(t, seg.CalculateChecksum(), mergedChecksum)
depCtx.BlockingClose()
block1.Close()
diff --git a/src/dbnode/storage/block/block_test.go b/src/dbnode/storage/block/block_test.go
index 195235a6e0..5f82923186 100644
--- a/src/dbnode/storage/block/block_test.go
+++ b/src/dbnode/storage/block/block_test.go
@@ -25,7 +25,6 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/ts"
@@ -35,9 +34,9 @@ import (
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/m3db/m3/src/dbnode/namespace"
)
func testDatabaseBlock(ctrl *gomock.Controller) *dbBlock {
@@ -189,7 +188,7 @@ func TestDatabaseBlockMerge(t *testing.T) {
// Make sure the checksum was updated
mergedChecksum, err := block1.Checksum()
require.NoError(t, err)
- require.Equal(t, digest.SegmentChecksum(seg), mergedChecksum)
+ require.Equal(t, seg.CalculateChecksum(), mergedChecksum)
// Make sure each segment reader was only finalized once
require.Equal(t, 3, len(segmentReaders))
@@ -383,7 +382,7 @@ func TestDatabaseBlockMergeChained(t *testing.T) {
// Make sure the checksum was updated
mergedChecksum, err := block1.Checksum()
require.NoError(t, err)
- require.Equal(t, digest.SegmentChecksum(seg), mergedChecksum)
+ require.Equal(t, seg.CalculateChecksum(), mergedChecksum)
// Make sure each segment reader was only finalized once
require.Equal(t, 5, len(segmentReaders))
@@ -492,7 +491,7 @@ func TestDatabaseBlockChecksumMergesAndRecalculates(t *testing.T) {
// Make sure the new checksum is correct
mergedChecksum, err := block1.Checksum()
require.NoError(t, err)
- require.Equal(t, digest.SegmentChecksum(seg), mergedChecksum)
+ require.Equal(t, seg.CalculateChecksum(), mergedChecksum)
}
func TestDatabaseBlockStreamMergePerformsCopy(t *testing.T) {
diff --git a/src/dbnode/storage/block/retriever_manager.go b/src/dbnode/storage/block/retriever_manager.go
index db10ba443b..6fcc8327a5 100644
--- a/src/dbnode/storage/block/retriever_manager.go
+++ b/src/dbnode/storage/block/retriever_manager.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
@@ -34,6 +35,7 @@ import (
// new database block retrievers
type NewDatabaseBlockRetrieverFn func(
md namespace.Metadata,
+ shardSet sharding.ShardSet,
) (DatabaseBlockRetriever, error)
// NewDatabaseBlockRetrieverManager creates a new manager
@@ -55,6 +57,7 @@ type blockRetrieverManager struct {
func (m *blockRetrieverManager) Retriever(
md namespace.Metadata,
+ shardSet sharding.ShardSet,
) (DatabaseBlockRetriever, error) {
m.RLock()
retriever, ok := m.retrievers.Get(md.ID())
@@ -72,7 +75,7 @@ func (m *blockRetrieverManager) Retriever(
}
var err error
- retriever, err = m.newRetrieverFn(md)
+ retriever, err = m.newRetrieverFn(md, shardSet)
if err != nil {
return nil, err
}
@@ -94,7 +97,7 @@ func NewDatabaseShardBlockRetriever(
) DatabaseShardBlockRetriever {
return &shardBlockRetriever{
DatabaseBlockRetriever: r,
- shard: shard,
+ shard: shard,
}
}
diff --git a/src/dbnode/storage/block/retriever_new_map_gen.go b/src/dbnode/storage/block/retriever_new_map_gen.go
index 95123b98b8..fa56e28d10 100644
--- a/src/dbnode/storage/block/retriever_new_map_gen.go
+++ b/src/dbnode/storage/block/retriever_new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/storage/block/types.go b/src/dbnode/storage/block/types.go
index 132b1fb20c..937126f112 100644
--- a/src/dbnode/storage/block/types.go
+++ b/src/dbnode/storage/block/types.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -25,6 +25,7 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
@@ -69,9 +70,10 @@ type FilteredBlocksMetadataIter interface {
// FetchBlockResult captures the block start time, the readers for the underlying streams, the
// corresponding checksum and any errors encountered.
type FetchBlockResult struct {
- Start time.Time
- Blocks []xio.BlockReader
- Err error
+ Start time.Time
+ FirstWrite time.Time
+ Blocks []xio.BlockReader
+ Err error
}
// FetchBlocksMetadataOptions are options used when fetching blocks metadata.
@@ -79,6 +81,7 @@ type FetchBlocksMetadataOptions struct {
IncludeSizes bool
IncludeChecksums bool
IncludeLastRead bool
+ OnlyDisk bool
}
// FetchBlockMetadataResult captures the block start time, the block size, and any errors encountered
@@ -279,6 +282,8 @@ type DatabaseBlockRetriever interface {
onRetrieve OnRetrieveBlock,
nsCtx namespace.Context,
) (xio.BlockReader, error)
+
+ AssignShardSet(shardSet sharding.ShardSet)
}
// DatabaseShardBlockRetriever is a block retriever bound to a shard.
@@ -297,7 +302,10 @@ type DatabaseShardBlockRetriever interface {
// for different namespaces.
type DatabaseBlockRetrieverManager interface {
// Retriever provides the DatabaseBlockRetriever for the given namespace.
- Retriever(nsMetadata namespace.Metadata) (DatabaseBlockRetriever, error)
+ Retriever(
+ nsMetadata namespace.Metadata,
+ shardSet sharding.ShardSet,
+ ) (DatabaseBlockRetriever, error)
}
// DatabaseShardBlockRetrieverManager creates and holds shard block
diff --git a/src/dbnode/storage/block/wired_list.go b/src/dbnode/storage/block/wired_list.go
index 5e4f133080..e804188b88 100644
--- a/src/dbnode/storage/block/wired_list.go
+++ b/src/dbnode/storage/block/wired_list.go
@@ -75,7 +75,7 @@ var (
// WiredList is a database block wired list.
type WiredList struct {
- sync.Mutex
+ mu sync.RWMutex
nowFn clock.NowFn
@@ -155,8 +155,8 @@ func (l *WiredList) SetRuntimeOptions(value runtime.Options) {
// Start starts processing the wired list
func (l *WiredList) Start() error {
- l.Lock()
- defer l.Unlock()
+ l.mu.Lock()
+ defer l.mu.Unlock()
if l.updatesCh != nil {
return errAlreadyStarted
}
@@ -181,8 +181,8 @@ func (l *WiredList) Start() error {
// Stop stops processing the wired list
func (l *WiredList) Stop() error {
- l.Lock()
- defer l.Unlock()
+ l.mu.Lock()
+ defer l.mu.Unlock()
if l.updatesCh == nil {
return errAlreadyStopped
@@ -204,13 +204,28 @@ func (l *WiredList) Stop() error {
//
// We use a channel and a background processing goroutine to reduce blocking / lock contention.
func (l *WiredList) BlockingUpdate(v DatabaseBlock) {
+ // Fast path, don't use defer (in Go 1.14 this won't matter anymore since
+ // defer is basically compile time for simple callsites).
+ l.mu.RLock()
+ if l.updatesCh == nil {
+ l.mu.RUnlock()
+ return
+ }
l.updatesCh <- v
+ l.mu.RUnlock()
}
// NonBlockingUpdate will attempt to put the block in the events channel, but will not block
// if the channel is full. Used in cases where a blocking update could trigger deadlock with
// the WiredList itself.
func (l *WiredList) NonBlockingUpdate(v DatabaseBlock) bool {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ if l.updatesCh == nil {
+ return false
+ }
+
select {
case l.updatesCh <- v:
return true
diff --git a/src/dbnode/storage/block/wired_list_test.go b/src/dbnode/storage/block/wired_list_test.go
index 8537112b9f..7a473ec0ce 100644
--- a/src/dbnode/storage/block/wired_list_test.go
+++ b/src/dbnode/storage/block/wired_list_test.go
@@ -27,6 +27,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/clock"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/checked"
@@ -36,7 +37,6 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/uber-go/tally"
- "github.com/m3db/m3/src/dbnode/namespace"
)
// The tests in this file use Start and Stop a lot to ensure
@@ -176,3 +176,36 @@ func wiredListTestWiredBlocksString(l *WiredList) string { // nolint: unused
}
return b.String()
}
+
+func TestWiredListUpdateNoopsAfterStop(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ l, _ := newTestWiredList(nil, nil)
+
+ opts := testOptions.SetWiredList(l)
+
+ l.Start()
+
+ var blocks []*dbBlock
+ for i := 0; i < 2; i++ {
+ bl := newTestUnwireableBlock(ctrl, fmt.Sprintf("foo.%d", i), opts)
+ blocks = append(blocks, bl)
+ }
+
+ l.BlockingUpdate(blocks[0])
+ l.BlockingUpdate(blocks[1])
+ require.NoError(t, l.Stop())
+ l.BlockingUpdate(blocks[0])
+ l.NonBlockingUpdate(blocks[0])
+
+ // Order due to LRU should be: 0, 1, since next updates are rejected
+ require.Equal(t, blocks[0], l.root.next())
+ require.Equal(t, blocks[1], l.root.next().next())
+
+ // Assert end
+ require.Equal(t, &l.root, l.root.next().next().next())
+
+ // Assert tail
+ require.Equal(t, blocks[1], l.root.prev())
+}
diff --git a/src/dbnode/storage/bootstrap.go b/src/dbnode/storage/bootstrap.go
index 9c2d3047f8..2c569aee3a 100644
--- a/src/dbnode/storage/bootstrap.go
+++ b/src/dbnode/storage/bootstrap.go
@@ -28,8 +28,10 @@ import (
"github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
+ "github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/instrument"
+ xtime "github.com/m3db/m3/src/x/time"
"github.com/uber-go/tally"
"go.uber.org/zap"
@@ -83,7 +85,8 @@ type bootstrapManager struct {
hasPending bool
status tally.Gauge
bootstrapDuration tally.Timer
- lastBootstrapCompletionTime time.Time
+ durableStatus tally.Gauge
+ lastBootstrapCompletionTime xtime.UnixNano
}
func newBootstrapManager(
@@ -102,6 +105,7 @@ func newBootstrapManager(
processProvider: opts.BootstrapProcessProvider(),
status: scope.Gauge("bootstrapped"),
bootstrapDuration: scope.Timer("bootstrap-duration"),
+ durableStatus: scope.Gauge("bootstrapped-durable"),
}
m.bootstrapFn = m.bootstrap
return m
@@ -114,8 +118,11 @@ func (m *bootstrapManager) IsBootstrapped() bool {
return state == Bootstrapped
}
-func (m *bootstrapManager) LastBootstrapCompletionTime() (time.Time, bool) {
- return m.lastBootstrapCompletionTime, !m.lastBootstrapCompletionTime.IsZero()
+func (m *bootstrapManager) LastBootstrapCompletionTime() (xtime.UnixNano, bool) {
+ m.RLock()
+ bsTime := m.lastBootstrapCompletionTime
+ m.RUnlock()
+ return bsTime, bsTime > 0
}
func (m *bootstrapManager) Bootstrap() (BootstrapResult, error) {
@@ -138,7 +145,7 @@ func (m *bootstrapManager) Bootstrap() (BootstrapResult, error) {
// NB(xichen): disable filesystem manager before we bootstrap to minimize
// the impact of file operations on bootstrapping performance
- m.mediator.DisableFileOps()
+ m.mediator.DisableFileOpsAndWait()
defer m.mediator.EnableFileOps()
// Keep performing bootstraps until none pending and no error returned.
@@ -188,8 +195,9 @@ func (m *bootstrapManager) Bootstrap() (BootstrapResult, error) {
// load to the cluster. It turns out to be better to let ticking happen naturally
// on its own course so that the load of ticking and flushing is more spread out
// across the cluster.
-
- m.lastBootstrapCompletionTime = m.nowFn()
+ m.Lock()
+ m.lastBootstrapCompletionTime = xtime.ToUnixNano(m.nowFn())
+ m.Unlock()
return result, nil
}
@@ -199,6 +207,12 @@ func (m *bootstrapManager) Report() {
} else {
m.status.Update(0)
}
+
+ if m.database.IsBootstrappedAndDurable() {
+ m.durableStatus.Update(1)
+ } else {
+ m.durableStatus.Update(0)
+ }
}
type bootstrapNamespace struct {
@@ -207,6 +221,9 @@ type bootstrapNamespace struct {
}
func (m *bootstrapManager) bootstrap() error {
+ ctx := context.NewContext()
+ defer ctx.Close()
+
// NB(r): construct new instance of the bootstrap process to avoid
// state being kept around by bootstrappers.
process, err := m.processProvider.Provide()
@@ -214,7 +231,7 @@ func (m *bootstrapManager) bootstrap() error {
return err
}
- namespaces, err := m.database.GetOwnedNamespaces()
+ namespaces, err := m.database.OwnedNamespaces()
if err != nil {
return err
}
@@ -248,7 +265,7 @@ func (m *bootstrapManager) bootstrap() error {
i, namespace := i, namespace
prepareWg.Add(1)
go func() {
- shards, err := namespace.PrepareBootstrap()
+ shards, err := namespace.PrepareBootstrap(ctx)
prepareLock.Lock()
defer func() {
@@ -298,19 +315,7 @@ func (m *bootstrapManager) bootstrap() error {
// actually exist on disk (since bootstrappers can write
// new blocks to disk).
hooks := bootstrap.NewNamespaceHooks(bootstrap.NamespaceHooksOptions{
- BootstrapSourceEnd: func() error {
- var wg sync.WaitGroup
- for _, shard := range ns.shards {
- shard := shard
- wg.Add(1)
- go func() {
- shard.UpdateFlushStates()
- wg.Done()
- }()
- }
- wg.Wait()
- return nil
- },
+ BootstrapSourceEnd: newBootstrapSourceEndHook(ns.shards),
})
accumulator := NewDatabaseNamespaceDataAccumulator(ns.namespace)
@@ -330,7 +335,7 @@ func (m *bootstrapManager) bootstrap() error {
m.log.Info("bootstrap started", logFields...)
// Run the bootstrap.
- bootstrapResult, err := process.Run(start, targets)
+ bootstrapResult, err := process.Run(ctx, start, targets)
bootstrapDuration := m.nowFn().Sub(start)
m.bootstrapDuration.Record(bootstrapDuration)
@@ -361,7 +366,7 @@ func (m *bootstrapManager) bootstrap() error {
return err
}
- if err := namespace.Bootstrap(result); err != nil {
+ if err := namespace.Bootstrap(ctx, result); err != nil {
m.log.Info("bootstrap error", append(logFields, []zapcore.Field{
zap.String("namespace", id.String()),
zap.Error(err),
diff --git a/src/dbnode/storage/bootstrap/bootstrap_mock.go b/src/dbnode/storage/bootstrap/bootstrap_mock.go
index 374b951130..f3e2297922 100644
--- a/src/dbnode/storage/bootstrap/bootstrap_mock.go
+++ b/src/dbnode/storage/bootstrap/bootstrap_mock.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/golang/mock/gomock"
@@ -124,18 +125,55 @@ func (m *MockProcess) EXPECT() *MockProcessMockRecorder {
}
// Run mocks base method
-func (m *MockProcess) Run(start time.Time, namespaces []ProcessNamespace) (NamespaceResults, error) {
+func (m *MockProcess) Run(ctx context.Context, start time.Time, namespaces []ProcessNamespace) (NamespaceResults, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Run", start, namespaces)
+ ret := m.ctrl.Call(m, "Run", ctx, start, namespaces)
ret0, _ := ret[0].(NamespaceResults)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Run indicates an expected call of Run
-func (mr *MockProcessMockRecorder) Run(start, namespaces interface{}) *gomock.Call {
+func (mr *MockProcessMockRecorder) Run(ctx, start, namespaces interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockProcess)(nil).Run), start, namespaces)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockProcess)(nil).Run), ctx, start, namespaces)
+}
+
+// MockHook is a mock of Hook interface
+type MockHook struct {
+ ctrl *gomock.Controller
+ recorder *MockHookMockRecorder
+}
+
+// MockHookMockRecorder is the mock recorder for MockHook
+type MockHookMockRecorder struct {
+ mock *MockHook
+}
+
+// NewMockHook creates a new mock instance
+func NewMockHook(ctrl *gomock.Controller) *MockHook {
+ mock := &MockHook{ctrl: ctrl}
+ mock.recorder = &MockHookMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockHook) EXPECT() *MockHookMockRecorder {
+ return m.recorder
+}
+
+// Run mocks base method
+func (m *MockHook) Run() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Run")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Run indicates an expected call of Run
+func (mr *MockHookMockRecorder) Run() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockHook)(nil).Run))
}
// MockNamespaceDataAccumulator is a mock of NamespaceDataAccumulator interface
@@ -525,18 +563,18 @@ func (mr *MockBootstrapperMockRecorder) String() *gomock.Call {
}
// Bootstrap mocks base method
-func (m *MockBootstrapper) Bootstrap(namespaces Namespaces) (NamespaceResults, error) {
+func (m *MockBootstrapper) Bootstrap(ctx context.Context, namespaces Namespaces) (NamespaceResults, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Bootstrap", namespaces)
+ ret := m.ctrl.Call(m, "Bootstrap", ctx, namespaces)
ret0, _ := ret[0].(NamespaceResults)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Bootstrap indicates an expected call of Bootstrap
-func (mr *MockBootstrapperMockRecorder) Bootstrap(namespaces interface{}) *gomock.Call {
+func (mr *MockBootstrapperMockRecorder) Bootstrap(ctx, namespaces interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockBootstrapper)(nil).Bootstrap), namespaces)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockBootstrapper)(nil).Bootstrap), ctx, namespaces)
}
// MockSource is a mock of Source interface
@@ -593,16 +631,16 @@ func (mr *MockSourceMockRecorder) AvailableIndex(ns, shardsTimeRanges, opts inte
}
// Read mocks base method
-func (m *MockSource) Read(namespaces Namespaces) (NamespaceResults, error) {
+func (m *MockSource) Read(ctx context.Context, namespaces Namespaces) (NamespaceResults, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Read", namespaces)
+ ret := m.ctrl.Call(m, "Read", ctx, namespaces)
ret0, _ := ret[0].(NamespaceResults)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Read indicates an expected call of Read
-func (mr *MockSourceMockRecorder) Read(namespaces interface{}) *gomock.Call {
+func (mr *MockSourceMockRecorder) Read(ctx, namespaces interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockSource)(nil).Read), namespaces)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockSource)(nil).Read), ctx, namespaces)
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/README.md b/src/dbnode/storage/bootstrap/bootstrapper/README.md
index 2be0917566..b062aaac3d 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/README.md
+++ b/src/dbnode/storage/bootstrap/bootstrapper/README.md
@@ -7,6 +7,7 @@ The collection of bootstrappers comprise the task executed when bootstrapping a
- `fs`: The filesystem bootstrapper, used to bootstrap as much data as possible from the local filesystem.
- `peers`: The peers bootstrapper, used to bootstrap any remaining data from peers. This is used for a full node join too.
- `commitlog`: The commit log bootstrapper, currently only used in the case that peers bootstrapping fails. Once the current block is being snapshotted frequently to disk it might be faster and make more sense to not actively use the peers bootstrapper and just use a combination of the filesystem bootstrapper and the minimal time range required from the commit log bootstrapper.
+ - *NOTE*: the commitlog bootstrapper is special cased in that it runs for the *entire* bootstrappable range per shard whereas other bootstrappers fill in the unfulfilled gaps as bootstrapping progresses.
## Cache policies
@@ -23,3 +24,26 @@ The peers bootstrapper similarly bootstraps all the data from peers that the fil
For the recently read policy the filesystem bootstrapper will simply fulfill the time ranges requested matching without actually loading the series and blocks from the files it discovers. This relies on data been fetched lazily from the filesystem when data is required for a series that does not live on heap.
The peers bootstrapper will bootstrap all time ranges requested, and if performing a bootstrap with persistence enabled for a time range, will write the data to disk and then remove the results from memory. A bootstrap with persistence enabled is used for any data that is immutable at the time that bootstrapping commences. For time ranges that are mutable the peer bootstrapper will still write the data out to disk in a durable manner, but in the form of a snapshot, and the series and blocks will still be returned directly as a result from the bootstrapper. This enables the commit log bootstrapper to recover the data in case the node shuts down before the in-memory data can be flushed.
+
+## Topology Changes
+
+When nodes are added to a replication group, shards are given away to the joining node. Those shards are closed and we re-bootstrap with the shards that we own.
+When nodes are removed from a replication group, shards from the removed node are given to remaining nodes in a replication group. The remaining nodes in the replication group will bootstrap the "new" shards that were assigned to it.
+Note that we also take writes for shards that we own while bootstrapping. However, we do not allow warm/cold flushes to happen while bootstrapping.
+
+For example, see the following sequences:
+(Node add)
+- Node 1:
+ - Initial bootstrap (256 shards)
+ - Node add
+ - Bootstrap (128 shards) // These are the remaining shards it owns.
+- Node 2:
+ - Node add
+ - Inital bootstrap (128 shards) // These are received from Node 1
+
+(Node remove)
+- Node 1:
+ - Node remove
+ - Bootstrap (128 shards) // These are received form Node 2, it owns 256 now.
+- Node 2:
+ - Node remove
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/base.go b/src/dbnode/storage/bootstrap/bootstrapper/base.go
index f1df77cbff..c6e78d36cb 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/base.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/base.go
@@ -25,6 +25,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
+ "github.com/m3db/m3/src/x/context"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
@@ -75,6 +76,7 @@ func (b baseBootstrapper) String() string {
}
func (b baseBootstrapper) Bootstrap(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
logFields := []zapcore.Field{
@@ -135,7 +137,7 @@ func (b baseBootstrapper) Bootstrap(
b.log.Info("bootstrap from source started", logFields...)
// Run the bootstrap source.
- currResults, err := b.src.Read(curr)
+ currResults, err := b.src.Read(ctx, curr)
logFields = append(logFields, zap.Duration("took", nowFn().Sub(begin)))
if err != nil {
@@ -164,7 +166,7 @@ func (b baseBootstrapper) Bootstrap(
// If there are some time ranges the current bootstrapper could not fulfill,
// that we can attempt then pass it along to the next bootstrapper.
if next.Namespaces.Len() > 0 {
- nextResults, err := b.next.Bootstrap(next)
+ nextResults, err := b.next.Bootstrap(ctx, next)
if err != nil {
return bootstrap.NamespaceResults{}, err
}
@@ -240,9 +242,9 @@ func (b baseBootstrapper) logSuccessAndDetermineCurrResultsUnfulfilledAndNextBoo
nextNamespace.DataRunOptions.ShardTimeRanges = dataUnfulfilled.Copy()
var (
- indexCurrRequested = result.ShardTimeRanges{}
- indexCurrFulfilled = result.ShardTimeRanges{}
- indexUnfulfilled = result.ShardTimeRanges{}
+ indexCurrRequested = result.NewShardTimeRanges()
+ indexCurrFulfilled = result.NewShardTimeRanges()
+ indexUnfulfilled = result.NewShardTimeRanges()
)
if currNamespace.Metadata.Options().IndexOptions().Enabled() {
// Calculate bootstrap time ranges.
@@ -266,12 +268,11 @@ func (b baseBootstrapper) logSuccessAndDetermineCurrResultsUnfulfilledAndNextBoo
// Set the modified result.
currResults.Results.Set(id, currResult)
- // Set the next bootstrapper namespace run options if we need to bootstrap
- // further time ranges.
- if !nextNamespace.DataRunOptions.ShardTimeRanges.IsEmpty() ||
- !nextNamespace.IndexRunOptions.ShardTimeRanges.IsEmpty() {
- next.Namespaces.Set(id, nextNamespace)
- }
+ // Always set the next bootstrapper namespace run options regardless of
+ // whether there are unfulfilled index/data shard time ranges.
+ // NB(bodu): We perform short circuiting directly in the peers bootstrapper and the
+ // commitlog bootstrapper should always run for all time ranges.
+ next.Namespaces.Set(id, nextNamespace)
// Log the result.
_, _, dataRangeRequested := dataCurrRequested.MinMaxRange()
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/base_test.go b/src/dbnode/storage/bootstrap/bootstrapper/base_test.go
index 71eb94f8e2..8aa123d0f0 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/base_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/base_test.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -69,7 +70,9 @@ func testTargetRanges() xtime.Ranges {
}
func testShardTimeRanges() result.ShardTimeRanges {
- return map[uint32]xtime.Ranges{testShard: testTargetRanges()}
+ r := result.NewShardTimeRanges()
+ r.Set(testShard, testTargetRanges())
+ return r
}
func testResult(
@@ -78,9 +81,8 @@ func testResult(
shard uint32,
unfulfilledRange xtime.Ranges,
) bootstrap.NamespaceResults {
- unfulfilled := result.ShardTimeRanges{
- shard: unfulfilledRange,
- }
+ unfulfilled := result.NewShardTimeRanges()
+ unfulfilled.Set(shard, unfulfilledRange)
opts := bootstrap.NamespaceResultsMapOptions{}
results := bootstrap.NewNamespaceResultsMap(opts)
@@ -127,12 +129,13 @@ func TestBaseBootstrapperEmptyRangeWithIndex(t *testing.T) {
func testBaseBootstrapperEmptyRange(t *testing.T, withIndex bool) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- src, _, base := testBaseBootstrapper(t, ctrl)
+ src, next, base := testBaseBootstrapper(t, ctrl)
testNs := testNsMetadata(t, withIndex)
- rngs := result.ShardTimeRanges{}
+ rngs := result.NewShardTimeRanges()
unfulfilled := xtime.NewRanges()
nsResults := testResult(testNs, withIndex, testShard, unfulfilled)
+ nextResult := testResult(testNs, withIndex, testShard, xtime.NewRanges())
shardRangeMatcher := bootstrap.ShardTimeRangesMatcher{Ranges: rngs}
src.EXPECT().AvailableData(testNs, shardRangeMatcher, testDefaultRunOpts).
Return(rngs, nil)
@@ -145,10 +148,15 @@ func testBaseBootstrapperEmptyRange(t *testing.T, withIndex bool) {
defer tester.Finish()
matcher := bootstrap.NamespaceMatcher{Namespaces: tester.Namespaces}
- src.EXPECT().Read(matcher).DoAndReturn(
- func(namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error) {
+ src.EXPECT().
+ Read(gomock.Any(), matcher).
+ DoAndReturn(func(
+ ctx context.Context,
+ namespaces bootstrap.Namespaces,
+ ) (bootstrap.NamespaceResults, error) {
return nsResults, nil
})
+ next.EXPECT().Bootstrap(gomock.Any(), matcher).Return(nextResult, nil)
// Test non-nil empty range
tester.TestBootstrapWith(base)
@@ -170,11 +178,12 @@ func TestBaseBootstrapperCurrentNoUnfulfilledWithIndex(t *testing.T) {
func testBaseBootstrapperCurrentNoUnfulfilled(t *testing.T, withIndex bool) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- src, _, base := testBaseBootstrapper(t, ctrl)
+ src, next, base := testBaseBootstrapper(t, ctrl)
testNs := testNsMetadata(t, withIndex)
unfulfilled := xtime.NewRanges()
nsResults := testResult(testNs, withIndex, testShard, unfulfilled)
+ nextResult := testResult(testNs, withIndex, testShard, xtime.NewRanges())
targetRanges := testShardTimeRanges()
src.EXPECT().AvailableData(testNs, targetRanges, testDefaultRunOpts).
@@ -189,10 +198,15 @@ func testBaseBootstrapperCurrentNoUnfulfilled(t *testing.T, withIndex bool) {
defer tester.Finish()
matcher := bootstrap.NamespaceMatcher{Namespaces: tester.Namespaces}
- src.EXPECT().Read(matcher).DoAndReturn(
- func(namespaces bootstrap.Namespaces) (bootstrap.NamespaceResults, error) {
+ src.EXPECT().
+ Read(gomock.Any(), matcher).
+ DoAndReturn(func(
+ ctx context.Context,
+ namespaces bootstrap.Namespaces,
+ ) (bootstrap.NamespaceResults, error) {
return nsResults, nil
})
+ next.EXPECT().Bootstrap(gomock.Any(), matcher).Return(nextResult, nil)
tester.TestBootstrapWith(base)
assert.Equal(t, nsResults, tester.Results)
@@ -235,8 +249,8 @@ func testBaseBootstrapperCurrentSomeUnfulfilled(t *testing.T, withIndex bool) {
defer tester.Finish()
matcher := bootstrap.NamespaceMatcher{Namespaces: tester.Namespaces}
- src.EXPECT().Read(matcher).Return(currResult, nil)
- next.EXPECT().Bootstrap(matcher).Return(nextResult, nil)
+ src.EXPECT().Read(gomock.Any(), matcher).Return(currResult, nil)
+ next.EXPECT().Bootstrap(gomock.Any(), matcher).Return(nextResult, nil)
tester.TestBootstrapWith(base)
tester.TestUnfulfilledForNamespaceIsEmpty(testNs)
@@ -255,11 +269,11 @@ func testBasebootstrapperNext(
src.EXPECT().
AvailableData(testNs, targetRanges, testDefaultRunOpts).
- Return(nil, nil)
+ Return(result.NewShardTimeRanges(), nil)
if withIndex {
src.EXPECT().
AvailableIndex(testNs, targetRanges, testDefaultRunOpts).
- Return(nil, nil)
+ Return(result.NewShardTimeRanges(), nil)
}
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, targetRanges,
@@ -269,8 +283,8 @@ func testBasebootstrapperNext(
emptyResult := testEmptyResult(testNs)
nextResult := testResult(testNs, withIndex, testShard, nextUnfulfilled)
matcher := bootstrap.NamespaceMatcher{Namespaces: tester.Namespaces}
- src.EXPECT().Read(matcher).Return(emptyResult, nil)
- next.EXPECT().Bootstrap(matcher).Return(nextResult, nil)
+ src.EXPECT().Read(gomock.Any(), matcher).Return(emptyResult, nil)
+ next.EXPECT().Bootstrap(gomock.Any(), matcher).Return(nextResult, nil)
tester.TestBootstrapWith(base)
@@ -280,7 +294,7 @@ func testBasebootstrapperNext(
expected := ex.DataResult.Unfulfilled()
expectedIdx := ex.IndexResult.Unfulfilled()
if !withIndex {
- expectedIdx = result.ShardTimeRanges{}
+ expectedIdx = result.NewShardTimeRanges()
}
tester.TestUnfulfilledForNamespace(testNs, expected, expectedIdx)
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go
index 6f0ab39023..91af1ba4a3 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go
@@ -37,6 +37,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
@@ -168,24 +169,11 @@ type readNamespaceResult struct {
// TODO(rartoul): Make this take the SnapshotMetadata files into account to reduce the
// number of commitlogs / snapshots that we need to read.
func (s *commitLogSource) Read(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
- timeRangesEmpty := true
- for _, elem := range namespaces.Namespaces.Iter() {
- namespace := elem.Value()
- dataRangesNotEmpty := !namespace.DataRunOptions.ShardTimeRanges.IsEmpty()
-
- indexEnabled := namespace.Metadata.Options().IndexOptions().Enabled()
- indexRangesNotEmpty := indexEnabled && !namespace.IndexRunOptions.ShardTimeRanges.IsEmpty()
- if dataRangesNotEmpty || indexRangesNotEmpty {
- timeRangesEmpty = false
- break
- }
- }
- if timeRangesEmpty {
- // Return empty result with no unfulfilled ranges.
- return bootstrap.NewNamespaceResults(namespaces), nil
- }
+ ctx, span, _ := ctx.StartSampledTraceSpan(tracepoint.BootstrapperCommitLogSourceRead)
+ defer span.Finish()
var (
// Emit bootstrapping gauge for duration of ReadData.
@@ -202,16 +190,20 @@ func (s *commitLogSource) Read(
startSnapshotsRead := s.nowFn()
s.log.Info("read snapshots start")
+ span.LogEvent("read_snapshots_start")
+
for _, elem := range namespaceIter {
ns := elem.Value()
accumulator := ns.DataAccumulator
// NB(r): Combine all shard time ranges across data and index
// so we can do in one go.
- shardTimeRanges := result.ShardTimeRanges{}
- shardTimeRanges.AddRanges(ns.DataRunOptions.ShardTimeRanges)
+ shardTimeRanges := result.NewShardTimeRanges()
+ // NB(bodu): Use TargetShardTimeRanges which covers the entire original target shard range
+ // since the commitlog bootstrapper should run for the entire bootstrappable range per shard.
+ shardTimeRanges.AddRanges(ns.DataRunOptions.TargetShardTimeRanges)
if ns.Metadata.Options().IndexOptions().Enabled() {
- shardTimeRanges.AddRanges(ns.IndexRunOptions.ShardTimeRanges)
+ shardTimeRanges.AddRanges(ns.IndexRunOptions.TargetShardTimeRanges)
}
namespaceResults[ns.Metadata.ID().String()] = &readNamespaceResult{
@@ -240,7 +232,7 @@ func (s *commitLogSource) Read(
// Start by reading any available snapshot files.
blockSize := ns.Metadata.Options().RetentionOptions().BlockSize()
- for shard, tr := range shardTimeRanges {
+ for shard, tr := range shardTimeRanges.Iter() {
err := s.bootstrapShardSnapshots(
ns.Metadata, accumulator, shard, tr, blockSize,
mostRecentCompleteSnapshotByBlockShard)
@@ -252,6 +244,7 @@ func (s *commitLogSource) Read(
s.log.Info("read snapshots done",
zap.Duration("took", s.nowFn().Sub(startSnapshotsRead)))
+ span.LogEvent("read_snapshots_done")
// Setup the series accumulator pipeline.
var (
@@ -294,17 +287,19 @@ func (s *commitLogSource) Read(
startCommitLogsRead = s.nowFn()
)
s.log.Info("read commit logs start")
+ span.LogEvent("read_commitlogs_start")
defer func() {
datapointsRead := 0
for _, worker := range workers {
datapointsRead += worker.datapointsRead
}
- s.log.Info("read finished",
- zap.Stringer("took", s.nowFn().Sub(startCommitLogsRead)),
+ s.log.Info("read commit logs done",
+ zap.Duration("took", s.nowFn().Sub(startCommitLogsRead)),
zap.Int("datapointsRead", datapointsRead),
zap.Int("datapointsSkippedNotBootstrappingNamespace", datapointsSkippedNotBootstrappingNamespace),
zap.Int("datapointsSkippedNotBootstrappingShard", datapointsSkippedNotBootstrappingShard),
zap.Int("datapointsSkippedShardNoLongerOwned", datapointsSkippedShardNoLongerOwned))
+ span.LogEvent("read_commitlogs_done")
}()
iter, corruptFiles, err := s.newIteratorFn(iterOpts)
@@ -425,21 +420,6 @@ func (s *commitLogSource) Read(
// Resolve the series in the accumulator.
accumulator := ns.accumulator
- // NB(r): Make sure that only series.EncodedTags are used and not
- // series.Tags (we explicitly ask for references to be returned and to
- // avoid decoding the tags if we don't have to).
- if decodedTags := len(entry.Series.Tags.Values()); decodedTags > 0 {
- msg := "commit log reader expects encoded tags"
- instrumentOpts := s.opts.ResultOptions().InstrumentOptions()
- instrument.EmitAndLogInvariantViolation(instrumentOpts, func(l *zap.Logger) {
- l.Error(msg,
- zap.Int("decodedTags", decodedTags),
- zap.Int("encodedTags", len(entry.Series.EncodedTags)))
- })
- err := instrument.InvariantErrorf(fmt.Sprintf("%s: decoded=%d", msg, decodedTags))
- return bootstrap.NamespaceResults{}, err
- }
-
var tagIter ident.TagIterator
if len(entry.Series.EncodedTags) > 0 {
tagDecoderCheckedBytes.Reset(entry.Series.EncodedTags)
@@ -495,8 +475,8 @@ func (s *commitLogSource) Read(
// NB(r): This can occur when a topology change happens then we
// bootstrap from the commit log data that the node no longer owns.
shard := seriesEntry.series.Shard
- _, bootstrapping := seriesEntry.namespace.dataAndIndexShardRanges[shard]
- if !bootstrapping {
+ _, ok = seriesEntry.namespace.dataAndIndexShardRanges.Get(shard)
+ if !ok {
datapointsSkippedNotBootstrappingShard++
continue
}
@@ -576,7 +556,7 @@ func (s *commitLogSource) snapshotFilesByShard(
shardsTimeRanges result.ShardTimeRanges,
) (map[uint32]fs.FileSetFilesSlice, error) {
snapshotFilesByShard := map[uint32]fs.FileSetFilesSlice{}
- for shard := range shardsTimeRanges {
+ for shard := range shardsTimeRanges.Iter() {
snapshotFiles, err := s.snapshotFilesFn(filePathPrefix, nsID, shard)
if err != nil {
return nil, err
@@ -604,7 +584,7 @@ func (s *commitLogSource) mostRecentCompleteSnapshotByBlockShard(
)
for currBlockStart := minBlock.Truncate(blockSize); currBlockStart.Before(maxBlock); currBlockStart = currBlockStart.Add(blockSize) {
- for shard := range shardsTimeRanges {
+ for shard := range shardsTimeRanges.Iter() {
// Anonymous func for easier clean up using defer.
func() {
var (
@@ -656,7 +636,7 @@ func (s *commitLogSource) mostRecentCompleteSnapshotByBlockShard(
zap.Time("blockStart", mostRecentSnapshot.ID.BlockStart),
zap.Uint32("shard", mostRecentSnapshot.ID.Shard),
zap.Int("index", mostRecentSnapshot.ID.VolumeIndex),
- zap.Strings("filepaths", mostRecentSnapshot.AbsoluteFilepaths),
+ zap.Strings("filepaths", mostRecentSnapshot.AbsoluteFilePaths),
zap.Error(err),
).
Error("error resolving snapshot time for snapshot file")
@@ -682,6 +662,30 @@ func (s *commitLogSource) bootstrapShardSnapshots(
blockSize time.Duration,
mostRecentCompleteSnapshotByBlockShard map[xtime.UnixNano]map[uint32]fs.FileSetFile,
) error {
+ // NB(bodu): We use info files on disk to check if a snapshot should be loaded in as cold or warm.
+ // We do this instead of cross refing blockstarts and current time to handle the case of bootstrapping a
+ // once warm block start after a node has been shut down for a long time. We consider all block starts we
+ // haven't flushed data for yet a warm block start.
+ fsOpts := s.opts.CommitLogOptions().FilesystemOptions()
+ readInfoFilesResults := fs.ReadInfoFiles(fsOpts.FilePathPrefix(), ns.ID(), shard,
+ fsOpts.InfoReaderBufferSize(), fsOpts.DecodingOptions(), persist.FileSetFlushType)
+ shardBlockStartsOnDisk := make(map[xtime.UnixNano]struct{})
+ for _, result := range readInfoFilesResults {
+ if err := result.Err.Error(); err != nil {
+ // If we couldn't read the info files then keep going to be consistent
+ // with the way the db shard updates its flush states in UpdateFlushStates().
+ s.log.Error("unable to read info files in commit log bootstrap",
+ zap.Uint32("shard", shard),
+ zap.Stringer("namespace", ns.ID()),
+ zap.String("filepath", result.Err.Filepath()),
+ zap.Error(err))
+ continue
+ }
+ info := result.Info
+ at := xtime.FromNanoseconds(info.BlockStart)
+ shardBlockStartsOnDisk[xtime.ToUnixNano(at)] = struct{}{}
+ }
+
rangeIter := shardTimeRanges.Iter()
for rangeIter.Next() {
var (
@@ -714,9 +718,13 @@ func (s *commitLogSource) bootstrapShardSnapshots(
continue
}
+ writeType := series.WarmWrite
+ if _, ok := shardBlockStartsOnDisk[xtime.ToUnixNano(blockStart)]; ok {
+ writeType = series.ColdWrite
+ }
if err := s.bootstrapShardBlockSnapshot(
ns, accumulator, shard, blockStart, blockSize,
- mostRecentCompleteSnapshotForShardBlock); err != nil {
+ mostRecentCompleteSnapshotForShardBlock, writeType); err != nil {
return err
}
}
@@ -732,6 +740,7 @@ func (s *commitLogSource) bootstrapShardBlockSnapshot(
blockStart time.Time,
blockSize time.Duration,
mostRecentCompleteSnapshot fs.FileSetFile,
+ writeType series.WriteType,
) error {
var (
bOpts = s.opts.ResultOptions()
@@ -787,16 +796,17 @@ func (s *commitLogSource) bootstrapShardBlockSnapshot(
dbBlock := blocksPool.Get()
dbBlock.Reset(blockStart, blockSize,
- ts.NewSegment(data, nil, ts.FinalizeHead), nsCtx)
+ ts.NewSegment(data, nil, 0, ts.FinalizeHead), nsCtx)
- // Resetting the block will trigger a checksum calculation, so use that instead
- // of calculating it twice.
+ // Resetting the block will trigger a checksum calculation, so use
+ // that instead of calculating it twice.
checksum, err := dbBlock.Checksum()
if err != nil {
return err
}
if checksum != expectedChecksum {
- return fmt.Errorf("checksum for series: %s was %d but expected %d", id, checksum, expectedChecksum)
+ return fmt.Errorf("checksum for series: %s was %d but expected %d",
+ id, checksum, expectedChecksum)
}
// NB(r): No parallelization required to checkout the series.
@@ -810,7 +820,7 @@ func (s *commitLogSource) bootstrapShardBlockSnapshot(
}
// Load into series.
- if err := ref.Series.LoadBlock(dbBlock, series.WarmWrite); err != nil {
+ if err := ref.Series.LoadBlock(dbBlock, writeType); err != nil {
return err
}
@@ -893,17 +903,11 @@ func (s *commitLogSource) startAccumulateWorker(worker *accumulateWorker) {
)
worker.datapointsRead++
- _, err := entry.Series.Write(ctx, dp.Timestamp, dp.Value,
+ _, _, err := entry.Series.Write(ctx, dp.Timestamp, dp.Value,
unit, annotation, series.WriteOptions{
- SchemaDesc: namespace.namespaceContext.Schema,
- // NB(r): Make sure this is the series we originally
- // checked out for writing too (which should be guaranteed
- // by the fact during shard tick we do not expire any
- // series unless they are bootstrapped).
- MatchUniqueIndex: true,
- MatchUniqueIndexValue: entry.UniqueIndex,
- BootstrapWrite: true,
- SkipOutOfRetention: true,
+ SchemaDesc: namespace.namespaceContext.Schema,
+ BootstrapWrite: true,
+ SkipOutOfRetention: true,
})
if err != nil {
// NB(r): Only log first error per worker since this could be very
@@ -929,7 +933,7 @@ func (s *commitLogSource) logAccumulateOutcome(
errs += worker.numErrors
}
if errs > 0 {
- s.log.Error("error bootstrapping from commit log", zap.Int("accmulateErrors", errs))
+ s.log.Error("error bootstrapping from commit log", zap.Int("accumulateErrors", errs))
}
if err := iter.Err(); err != nil {
s.log.Error("error reading commit log", zap.Error(err))
@@ -991,10 +995,10 @@ func (s *commitLogSource) availability(
) (result.ShardTimeRanges, error) {
var (
topoState = runOpts.InitialTopologyState()
- availableShardTimeRanges = result.ShardTimeRanges{}
+ availableShardTimeRanges = result.NewShardTimeRanges()
)
- for shardIDUint := range shardsTimeRanges {
+ for shardIDUint := range shardsTimeRanges.Iter() {
shardID := topology.ShardID(shardIDUint)
hostShardStates, ok := topoState.ShardStates[shardID]
if !ok {
@@ -1035,11 +1039,13 @@ func (s *commitLogSource) availability(
// to distinguish between "unfulfilled" data and "corrupt" data, then
// modify this to only say the commit log bootstrapper can fullfil
// "unfulfilled" data, but not corrupt data.
- availableShardTimeRanges[shardIDUint] = shardsTimeRanges[shardIDUint]
+ if tr, ok := shardsTimeRanges.Get(shardIDUint); ok {
+ availableShardTimeRanges.Set(shardIDUint, tr)
+ }
case shard.Unknown:
fallthrough
default:
- return result.ShardTimeRanges{}, fmt.Errorf("unknown shard state: %v", originShardState)
+ return result.NewShardTimeRanges(), fmt.Errorf("unknown shard state: %v", originShardState)
}
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go
index 94a73fcbd2..b51069f090 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go
@@ -62,10 +62,10 @@ func TestAvailableEmptyRangeError(t *testing.T) {
var (
opts = testDefaultOpts
src = newCommitLogSource(opts, fs.Inspection{})
- res, err = src.AvailableData(testNsMetadata(t), result.ShardTimeRanges{}, testDefaultRunOpts)
+ res, err = src.AvailableData(testNsMetadata(t), result.NewShardTimeRanges(), testDefaultRunOpts)
)
require.NoError(t, err)
- require.True(t, result.ShardTimeRanges{}.Equal(res))
+ require.True(t, result.NewShardTimeRanges().Equal(res))
}
func TestReadEmpty(t *testing.T) {
@@ -73,7 +73,7 @@ func TestReadEmpty(t *testing.T) {
src := newCommitLogSource(opts, fs.Inspection{})
md := testNsMetadata(t)
- target := result.ShardTimeRanges{}
+ target := result.NewShardTimeRanges()
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, target, md)
defer tester.Finish()
@@ -97,18 +97,17 @@ func TestReadErrorOnNewIteratorError(t *testing.T) {
return nil, nil, errors.New("an error")
}
- ranges := xtime.Ranges{}
- ranges = ranges.AddRange(xtime.Range{
- Start: time.Now(),
- End: time.Now().Add(time.Hour),
- })
+ ranges := xtime.NewRanges(xtime.Range{Start: time.Now(), End: time.Now().Add(time.Hour)})
md := testNsMetadata(t)
- target := result.ShardTimeRanges{0: ranges}
+ target := result.NewShardTimeRanges().Set(0, ranges)
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, target, md)
defer tester.Finish()
- res, err := src.Read(tester.Namespaces)
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ res, err := src.Read(ctx, tester.Namespaces)
require.Error(t, err)
require.Nil(t, res.Results)
tester.EnsureNoLoadedBlocks()
@@ -131,11 +130,7 @@ func testReadOrderedValues(t *testing.T, opts Options, md namespace.Metadata, se
start := now.Truncate(blockSize).Add(-blockSize)
end := now.Truncate(blockSize)
- ranges := xtime.Ranges{}
- ranges = ranges.AddRange(xtime.Range{
- Start: start,
- End: end,
- })
+ ranges := xtime.NewRanges(xtime.Range{Start: start, End: end})
foo := ts.Series{Namespace: nsCtx.ID, Shard: 0, ID: ident.StringID("foo")}
bar := ts.Series{Namespace: nsCtx.ID, Shard: 1, ID: ident.StringID("bar")}
@@ -159,7 +154,7 @@ func testReadOrderedValues(t *testing.T, opts Options, md namespace.Metadata, se
return newTestCommitLogIterator(values, nil), nil, nil
}
- targetRanges := result.ShardTimeRanges{0: ranges, 1: ranges}
+ targetRanges := result.NewShardTimeRanges().Set(0, ranges).Set(1, ranges)
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, targetRanges, md)
defer tester.Finish()
@@ -187,11 +182,7 @@ func testReadUnorderedValues(t *testing.T, opts Options, md namespace.Metadata,
start := now.Truncate(blockSize).Add(-blockSize)
end := now.Truncate(blockSize)
- ranges := xtime.Ranges{}
- ranges = ranges.AddRange(xtime.Range{
- Start: start,
- End: end,
- })
+ ranges := xtime.NewRanges(xtime.Range{Start: start, End: end})
foo := ts.Series{Namespace: nsCtx.ID, Shard: 0, ID: ident.StringID("foo")}
@@ -212,7 +203,7 @@ func testReadUnorderedValues(t *testing.T, opts Options, md namespace.Metadata,
return newTestCommitLogIterator(values, nil), nil, nil
}
- targetRanges := result.ShardTimeRanges{0: ranges, 1: ranges}
+ targetRanges := result.NewShardTimeRanges().Set(0, ranges).Set(1, ranges)
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, targetRanges, md)
defer tester.Finish()
@@ -243,11 +234,7 @@ func TestReadHandlesDifferentSeriesWithIdenticalUniqueIndex(t *testing.T) {
start := now.Truncate(blockSize).Add(-blockSize)
end := now.Truncate(blockSize)
- ranges := xtime.Ranges{}
- ranges = ranges.AddRange(xtime.Range{
- Start: start,
- End: end,
- })
+ ranges := xtime.NewRanges(xtime.Range{Start: start, End: end})
// All series need to be in the same shard to exercise the regression.
foo := ts.Series{
@@ -274,7 +261,7 @@ func TestReadHandlesDifferentSeriesWithIdenticalUniqueIndex(t *testing.T) {
return newTestCommitLogIterator(values, nil), nil, nil
}
- targetRanges := result.ShardTimeRanges{0: ranges, 1: ranges}
+ targetRanges := result.NewShardTimeRanges().Set(0, ranges).Set(1, ranges)
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, targetRanges, md)
defer tester.Finish()
@@ -306,7 +293,7 @@ func testItMergesSnapshotsAndCommitLogs(t *testing.T, opts Options,
now = time.Now()
start = now.Truncate(blockSize).Add(-blockSize)
end = now.Truncate(blockSize)
- ranges = xtime.Ranges{}
+ ranges = xtime.NewRanges()
foo = ts.Series{Namespace: nsCtx.ID, Shard: 0, ID: ident.StringID("foo")}
commitLogValues = testValues{
@@ -319,7 +306,7 @@ func testItMergesSnapshotsAndCommitLogs(t *testing.T, opts Options,
commitLogValues = setAnn(commitLogValues)
}
- ranges = ranges.AddRange(xtime.Range{
+ ranges.AddRange(xtime.Range{
Start: start,
End: end,
})
@@ -344,7 +331,7 @@ func testItMergesSnapshotsAndCommitLogs(t *testing.T, opts Options,
VolumeIndex: 0,
},
// Make sure path passes the "is snapshot" check in SnapshotTimeAndID method.
- AbsoluteFilepaths: []string{"snapshots/checkpoint"},
+ AbsoluteFilePaths: []string{"snapshots/checkpoint"},
CachedHasCompleteCheckpointFile: fs.EvalTrue,
CachedSnapshotTime: start.Add(time.Minute),
},
@@ -411,7 +398,7 @@ func testItMergesSnapshotsAndCommitLogs(t *testing.T, opts Options,
return mockReader, nil
}
- targetRanges := result.ShardTimeRanges{0: ranges}
+ targetRanges := result.NewShardTimeRanges().Set(0, ranges)
tester := bootstrap.BuildNamespacesTesterWithReaderIteratorPool(
t,
testDefaultRunOpts,
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go
index b006595faf..eb0bf14de2 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_index_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -21,7 +21,6 @@
package commitlog
import (
- "fmt"
"testing"
"time"
@@ -112,20 +111,20 @@ func TestBootstrapIndex(t *testing.T) {
ID: ident.StringID("baz"), EncodedTags: bazTags}
// Make sure we can handle series that don't have tags.
untagged := ts.Series{UniqueIndex: 3, Namespace: testNamespaceID,
- Shard: shardn(5), ID: ident.StringID("untagged"), Tags: ident.Tags{}}
+ Shard: shardn(5), ID: ident.StringID("untagged")}
// Make sure we skip series that are not within the bootstrap range.
outOfRange := ts.Series{UniqueIndex: 4, Namespace: testNamespaceID,
- Shard: shardn(3), ID: ident.StringID("outOfRange"), Tags: ident.Tags{}}
+ Shard: shardn(3), ID: ident.StringID("outOfRange")}
// Make sure we skip and dont panic on writes for shards that are higher than the maximum we're trying to bootstrap.
shardTooHigh := ts.Series{UniqueIndex: 5, Namespace: testNamespaceID,
- Shard: shardn(100), ID: ident.StringID("shardTooHigh"), Tags: ident.Tags{}}
+ Shard: shardn(100), ID: ident.StringID("shardTooHigh")}
// Make sure we skip series for shards that have no requested bootstrap ranges. The shard for this write needs
// to be less than the highest shard we actually plan to bootstrap.
noShardBootstrapRange := ts.Series{UniqueIndex: 6, Namespace: testNamespaceID,
- Shard: shardn(4), ID: ident.StringID("noShardBootstrapRange"), Tags: ident.Tags{}}
+ Shard: shardn(4), ID: ident.StringID("noShardBootstrapRange")}
// Make sure it handles multiple namespaces
someOtherNamespace := ts.Series{UniqueIndex: 7, Namespace: testNamespaceID2,
- Shard: shardn(0), ID: ident.StringID("series_OtherNamespace"), Tags: ident.Tags{}}
+ Shard: shardn(0), ID: ident.StringID("series_OtherNamespace")}
valuesNs := testValues{
{foo, start, 1.0, xtime.Second, nil},
@@ -156,23 +155,25 @@ func TestBootstrapIndex(t *testing.T) {
return newTestCommitLogIterator(values, nil), nil, nil
}
- ranges := xtime.Ranges{}
- ranges = ranges.AddRange(xtime.Range{
- Start: start,
- End: start.Add(dataBlockSize),
- })
- ranges = ranges.AddRange(xtime.Range{
- Start: start.Add(dataBlockSize),
- End: start.Add(2 * dataBlockSize),
- })
- ranges = ranges.AddRange(xtime.Range{
- Start: start.Add(2 * dataBlockSize),
- End: start.Add(3 * dataBlockSize),
- })
+ ranges := xtime.NewRanges(
+ xtime.Range{Start: start, End: start.Add(dataBlockSize)},
+ xtime.Range{Start: start.Add(dataBlockSize), End: start.Add(2 * dataBlockSize)},
+ xtime.Range{Start: start.Add(2 * dataBlockSize), End: start.Add(3 * dataBlockSize)})
// Don't include ranges for shard 4 as thats how we're testing the noShardBootstrapRange series.
- targetRanges := result.ShardTimeRanges{
- shardn(0): ranges, shardn(1): ranges, shardn(2): ranges, shardn(5): ranges}
+ targetRanges := result.NewShardTimeRanges().Set(
+ shardn(0),
+ ranges,
+ ).Set(
+ shardn(1),
+ ranges,
+ ).Set(
+ shardn(2),
+ ranges,
+ ).Set(
+ shardn(5),
+ ranges,
+ )
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, targetRanges, md1, md2, md3)
defer tester.Finish()
@@ -220,7 +221,7 @@ func TestBootstrapIndexEmptyShardTimeRanges(t *testing.T) {
return newTestCommitLogIterator(values, nil), nil, nil
}
- target := result.ShardTimeRanges{}
+ target := result.NewShardTimeRanges()
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, target, md)
defer tester.Finish()
@@ -229,183 +230,3 @@ func TestBootstrapIndexEmptyShardTimeRanges(t *testing.T) {
tester.EnsureNoLoadedBlocks()
tester.EnsureNoWrites()
}
-
-func verifyIndexResultsAreCorrect(
- values testValues,
- seriesNotToExpect map[string]struct{},
- indexResults result.IndexResults,
- indexBlockSize time.Duration,
-) error {
- expectedIndexBlocks := map[xtime.UnixNano]map[string]map[string]string{}
- for _, value := range values {
- if _, shouldNotExpect := seriesNotToExpect[value.s.ID.String()]; shouldNotExpect {
- continue
- }
-
- indexBlockStart := value.t.Truncate(indexBlockSize)
- expectedSeries, ok := expectedIndexBlocks[xtime.ToUnixNano(indexBlockStart)]
- if !ok {
- expectedSeries = map[string]map[string]string{}
- expectedIndexBlocks[xtime.ToUnixNano(indexBlockStart)] = expectedSeries
- }
-
- seriesID := string(value.s.ID.Bytes())
-
- existingTags, ok := expectedSeries[seriesID]
- if !ok {
- existingTags = map[string]string{}
- expectedSeries[seriesID] = existingTags
- }
- for _, tag := range value.s.Tags.Values() {
- existingTags[tag.Name.String()] = tag.Value.String()
- }
- }
-
- for indexBlockStart, expectedSeries := range expectedIndexBlocks {
- indexBlock, ok := indexResults[indexBlockStart]
- if !ok {
- return fmt.Errorf("missing index block: %v", indexBlockStart.ToTime().String())
- }
-
- if indexBlock.Fulfilled().IsEmpty() {
- return fmt.Errorf("index-block %v fulfilled is empty", indexBlockStart)
- }
-
- for _, seg := range indexBlock.Segments() {
- reader, err := seg.Reader()
- if err != nil {
- return err
- }
-
- docs, err := reader.AllDocs()
- if err != nil {
- return err
- }
-
- seenSeries := map[string]struct{}{}
- for docs.Next() {
- curr := docs.Current()
-
- _, ok := seenSeries[string(curr.ID)]
- if ok {
- return fmt.Errorf(
- "saw duplicate series: %v for block %v",
- string(curr.ID), indexBlockStart.ToTime().String())
- }
- seenSeries[string(curr.ID)] = struct{}{}
-
- expectedTags := expectedSeries[string(curr.ID)]
- matchingTags := map[string]struct{}{}
- for _, tag := range curr.Fields {
- if _, ok := matchingTags[string(tag.Name)]; ok {
- return fmt.Errorf("saw duplicate tag: %v for id: %v", tag.Name, string(curr.ID))
- }
- matchingTags[string(tag.Name)] = struct{}{}
-
- tagValue, ok := expectedTags[string(tag.Name)]
- if !ok {
- return fmt.Errorf("saw unexpected tag: %v for id: %v", tag.Name, string(curr.ID))
- }
-
- if tagValue != string(tag.Value) {
- return fmt.Errorf(
- "tag values for series: %v do not match. Expected: %v but got: %v",
- curr.ID, tagValue, string(tag.Value),
- )
- }
- }
-
- if len(expectedTags) != len(matchingTags) {
- return fmt.Errorf(
- "number of tags for series: %v do not match. Expected: %v, but got: %v",
- string(curr.ID), len(expectedTags), len(matchingTags),
- )
- }
- }
-
- if docs.Err() != nil {
- return docs.Err()
- }
-
- if err := docs.Close(); err != nil {
- return err
- }
-
- if len(expectedSeries) != len(seenSeries) {
- return fmt.Errorf(
- "expected %v series, but got %v series", len(expectedSeries), len(seenSeries))
- }
- }
- }
-
- return nil
-}
-
-func TestBootstrapIndexFailsForDecodedTags(t *testing.T) {
- var (
- opts = testDefaultOpts
- src = newCommitLogSource(opts, fs.Inspection{}).(*commitLogSource)
- dataBlockSize = 2 * time.Hour
- indexBlockSize = 4 * time.Hour
- namespaceOptions = namespaceOptions.
- SetRetentionOptions(
- namespaceOptions.
- RetentionOptions().
- SetBlockSize(dataBlockSize),
- ).
- SetIndexOptions(
- namespaceOptions.
- IndexOptions().
- SetBlockSize(indexBlockSize).
- SetEnabled(true),
- )
- )
- md1, err := namespace.NewMetadata(testNamespaceID, namespaceOptions)
- require.NoError(t, err)
-
- now := time.Now()
- start := now.Truncate(indexBlockSize)
-
- fooTags := ident.NewTags(ident.StringTag("city", "ny"))
-
- shardn := func(n int) uint32 { return uint32(n) }
- foo := ts.Series{UniqueIndex: 0, Namespace: testNamespaceID, Shard: shardn(0),
- ID: ident.StringID("foo"), Tags: fooTags}
-
- values := testValues{
- {foo, start, 1.0, xtime.Second, nil},
- }
-
- src.newIteratorFn = func(
- _ commitlog.IteratorOpts,
- ) (commitlog.Iterator, []commitlog.ErrorWithPath, error) {
- return newTestCommitLogIterator(values, nil), nil, nil
- }
-
- ranges := xtime.Ranges{}
- ranges = ranges.AddRange(xtime.Range{
- Start: start,
- End: start.Add(dataBlockSize),
- })
- ranges = ranges.AddRange(xtime.Range{
- Start: start.Add(dataBlockSize),
- End: start.Add(2 * dataBlockSize),
- })
- ranges = ranges.AddRange(xtime.Range{
- Start: start.Add(2 * dataBlockSize),
- End: start.Add(3 * dataBlockSize),
- })
-
- // Don't include ranges for shard 4 as thats how we're testing the noShardBootstrapRange series.
- targetRanges := result.ShardTimeRanges{
- shardn(0): ranges, shardn(1): ranges, shardn(2): ranges, shardn(5): ranges}
-
- tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, targetRanges, md1)
- defer tester.Finish()
-
- _, err = src.Read(tester.Namespaces)
- require.Error(t, err)
-
- tester.EnsureNoLoadedBlocks()
- tester.EnsureNoWrites()
-}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go
index 0d38a6e4b5..feccae682b 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go
@@ -53,7 +53,7 @@ import (
"github.com/leanovate/gopter"
"github.com/leanovate/gopter/gen"
"github.com/leanovate/gopter/prop"
- "github.com/spaolacci/murmur3"
+ murmur3 "github.com/m3db/stackmurmur3/v2"
"github.com/stretchr/testify/require"
)
@@ -229,8 +229,10 @@ func TestCommitLogSourcePropCorrectlyBootstrapsFromCommitlog(t *testing.T) {
for seriesID, data := range seriesForShard {
checkedBytes := checked.NewBytes(data, nil)
checkedBytes.IncRef()
- tags := orderedWritesBySeries[seriesID][0].series.Tags
- writer.Write(ident.StringID(seriesID), tags, checkedBytes, digest.Checksum(data))
+ tags := orderedWritesBySeries[seriesID][0].tags
+ metadata := persist.NewMetadataFromIDAndTags(ident.StringID(seriesID), tags,
+ persist.MetadataOptions{})
+ writer.Write(metadata, checkedBytes, digest.Checksum(data))
}
err = writer.Close()
@@ -331,11 +333,7 @@ func TestCommitLogSourcePropCorrectlyBootstrapsFromCommitlog(t *testing.T) {
// Determine time range to bootstrap
end := input.currentTime.Add(blockSize)
- ranges := xtime.Ranges{}
- ranges = ranges.AddRange(xtime.Range{
- Start: start,
- End: end,
- })
+ ranges := xtime.NewRanges(xtime.Range{Start: start, End: end})
// Determine which shards we need to bootstrap (based on the randomly
// generated data)
@@ -352,9 +350,9 @@ func TestCommitLogSourcePropCorrectlyBootstrapsFromCommitlog(t *testing.T) {
}
// Assign the previously-determined bootstrap range to each known shard
- shardTimeRanges := result.ShardTimeRanges{}
+ shardTimeRanges := result.NewShardTimeRanges()
for shard := range allShardsMap {
- shardTimeRanges[shard] = ranges
+ shardTimeRanges.Set(shard, ranges)
}
// Perform the bootstrap
@@ -374,7 +372,10 @@ func TestCommitLogSourcePropCorrectlyBootstrapsFromCommitlog(t *testing.T) {
runOpts := testDefaultRunOpts.SetInitialTopologyState(initialTopoState)
tester := bootstrap.BuildNamespacesTester(t, runOpts, shardTimeRanges, nsMeta)
- bootstrapResults, err := source.Bootstrap(tester.Namespaces)
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ bootstrapResults, err := source.Bootstrap(ctx, tester.Namespaces)
if err != nil {
return false, err
}
@@ -452,7 +453,7 @@ func TestCommitLogSourcePropCorrectlyBootstrapsFromCommitlog(t *testing.T) {
return true, nil
},
- genPropTestInputs(nsMeta, startTime),
+ genPropTestInputs(t, nsMeta, startTime),
))
if !props.Run(reporter) {
@@ -477,6 +478,7 @@ type generatedWrite struct {
// between time.Now().Add(-bufferFuture) and time.Now().Add(bufferPast).
arrivedAt time.Time
series ts.Series
+ tags ident.Tags
datapoint ts.Datapoint
unit xtime.Unit
annotation ts.Annotation
@@ -486,7 +488,7 @@ func (w generatedWrite) String() string {
return fmt.Sprintf("ID = %v, Datapoint = %+v", w.series.ID.String(), w.datapoint)
}
-func genPropTestInputs(nsMeta namespace.Metadata, blockStart time.Time) gopter.Gen {
+func genPropTestInputs(t *testing.T, nsMeta namespace.Metadata, blockStart time.Time) gopter.Gen {
curriedGenPropTestInput := func(input interface{}) gopter.Gen {
var (
inputs = input.([]interface{})
@@ -501,6 +503,7 @@ func genPropTestInputs(nsMeta namespace.Metadata, blockStart time.Time) gopter.G
)
return genPropTestInput(
+ t,
blockStart, bufferPast, bufferFuture,
snapshotTime, snapshotExists, commitLogExists,
numDatapoints, nsMeta.ID().String(), includeCorruptedCommitlogFile, multiNodeCluster)
@@ -530,6 +533,7 @@ func genPropTestInputs(nsMeta namespace.Metadata, blockStart time.Time) gopter.G
}
func genPropTestInput(
+ t *testing.T,
start time.Time,
bufferPast,
bufferFuture time.Duration,
@@ -541,7 +545,7 @@ func genPropTestInput(
includeCorruptedCommitlogFile bool,
multiNodeCluster bool,
) gopter.Gen {
- return gen.SliceOfN(numDatapoints, genWrite(start, bufferPast, bufferFuture, ns)).
+ return gen.SliceOfN(numDatapoints, genWrite(t, start, bufferPast, bufferFuture, ns)).
Map(func(val []generatedWrite) propTestInput {
return propTestInput{
currentTime: start,
@@ -557,7 +561,7 @@ func genPropTestInput(
})
}
-func genWrite(start time.Time, bufferPast, bufferFuture time.Duration, ns string) gopter.Gen {
+func genWrite(t *testing.T, start time.Time, bufferPast, bufferFuture time.Duration, ns string) gopter.Gen {
latestDatapointTime := start.Truncate(blockSize).Add(blockSize).Sub(start)
return gopter.CombineGens(
@@ -580,13 +584,16 @@ func genWrite(start time.Time, bufferPast, bufferFuture time.Duration, ns string
).Map(func(val []interface{}) generatedWrite {
var (
id = val[0].(string)
- t = val[1].(time.Time)
- a = t
+ tm = val[1].(time.Time)
+ a = tm
bufferPastOrFuture = val[2].(bool)
tagKey = val[3].(string)
tagVal = val[4].(string)
includeTags = val[5].(bool)
v = val[6].(float64)
+
+ tagEncoderPool = testCommitlogOpts.FilesystemOptions().TagEncoderPool()
+ tagSliceIter = ident.NewTagsIterator(ident.Tags{})
)
if bufferPastOrFuture {
@@ -595,17 +602,28 @@ func genWrite(start time.Time, bufferPast, bufferFuture time.Duration, ns string
a = a.Add(bufferPast)
}
+ tags := seriesUniqueTags(id, tagKey, tagVal, includeTags)
+ tagSliceIter.Reset(tags)
+
+ tagEncoder := tagEncoderPool.Get()
+ err := tagEncoder.Encode(tagSliceIter)
+ require.NoError(t, err)
+
+ encodedTagsChecked, ok := tagEncoder.Data()
+ require.True(t, ok)
+
return generatedWrite{
arrivedAt: a,
series: ts.Series{
ID: ident.StringID(id),
- Tags: seriesUniqueTags(id, tagKey, tagVal, includeTags),
Namespace: ident.StringID(ns),
Shard: hashIDToShard(ident.StringID(id)),
UniqueIndex: seriesUniqueIndex(id),
+ EncodedTags: ts.EncodedTags(encodedTagsChecked.Bytes()),
},
+ tags: tags,
datapoint: ts.Datapoint{
- Timestamp: t,
+ Timestamp: tm,
Value: v,
},
unit: xtime.Nanosecond,
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_test.go
index b4ad4fd081..03c92301b9 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_test.go
@@ -48,15 +48,15 @@ func TestAvailableData(t *testing.T) {
blockSize = 2 * time.Hour
numShards = uint32(4)
blockStart = time.Now().Truncate(blockSize)
- shardTimeRangesToBootstrap = result.ShardTimeRanges{}
- bootstrapRanges = xtime.Ranges{}.AddRange(xtime.Range{
+ shardTimeRangesToBootstrap = result.NewShardTimeRanges()
+ bootstrapRanges = xtime.NewRanges(xtime.Range{
Start: blockStart,
End: blockStart.Add(blockSize),
})
)
for i := 0; i < int(numShards); i++ {
- shardTimeRangesToBootstrap[uint32(i)] = bootstrapRanges
+ shardTimeRangesToBootstrap.Set(uint32(i), bootstrapRanges)
}
testCases := []struct {
@@ -72,7 +72,7 @@ func TestAvailableData(t *testing.T) {
tu.SelfID: tu.ShardsRange(0, numShards, shard.Initializing),
}),
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
{
title: "Single node - Shard unknown",
@@ -80,7 +80,7 @@ func TestAvailableData(t *testing.T) {
tu.SelfID: tu.ShardsRange(0, numShards, shard.Unknown),
}),
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
expectedErr: errors.New("unknown shard state: Unknown"),
},
{
@@ -115,7 +115,7 @@ func TestAvailableData(t *testing.T) {
notSelfID: tu.ShardsRange(0, numShards, shard.Available),
}),
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/migrator.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/migrator.go
new file mode 100644
index 0000000000..c9e7489031
--- /dev/null
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/migrator.go
@@ -0,0 +1,258 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migrator
+
+import (
+ "sync"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/dbnode/tracepoint"
+ "github.com/m3db/m3/src/x/context"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "go.uber.org/zap"
+)
+
+type worker struct {
+ persistManager persist.Manager
+ taskOptions migration.TaskOptions
+}
+
+// Migrator is responsible for migrating data filesets based on version information in
+// the info files.
+type Migrator struct {
+ migrationTaskFn MigrationTaskFn
+ infoFilesByNamespace fs.InfoFilesByNamespace
+ migrationOpts migration.Options
+ fsOpts fs.Options
+ instrumentOpts instrument.Options
+ storageOpts storage.Options
+ log *zap.Logger
+}
+
+// NewMigrator creates a new Migrator.
+func NewMigrator(opts Options) (Migrator, error) {
+ if err := opts.Validate(); err != nil {
+ return Migrator{}, err
+ }
+ return Migrator{
+ migrationTaskFn: opts.MigrationTaskFn(),
+ infoFilesByNamespace: opts.InfoFilesByNamespace(),
+ migrationOpts: opts.MigrationOptions(),
+ fsOpts: opts.FilesystemOptions(),
+ instrumentOpts: opts.InstrumentOptions(),
+ storageOpts: opts.StorageOptions(),
+ log: opts.InstrumentOptions().Logger(),
+ }, nil
+}
+
+// migrationCandidate is the struct we generate when we find a fileset in need of
+// migration. It's provided to the workers to perform the actual migration.
+type migrationCandidate struct {
+ newTaskFn migration.NewTaskFn
+ infoFileResult fs.ReadInfoFileResult
+ metadata namespace.Metadata
+ shard uint32
+}
+
+// mergeKey is the unique set of data that identifies an ReadInfoFileResult.
+type mergeKey struct {
+ metadata namespace.Metadata
+ shard uint32
+ blockStart int64
+}
+
+// completedMigration is the updated ReadInfoFileSet after a migration has been performed
+// plus the merge key, so that we can properly merge the updated result back into
+// infoFilesByNamespace map.
+type completedMigration struct {
+ key mergeKey
+ updatedInfoFileResult fs.ReadInfoFileResult
+}
+
+// Run runs the migrator.
+func (m *Migrator) Run(ctx context.Context) error {
+ ctx, span, _ := ctx.StartSampledTraceSpan(tracepoint.BootstrapperFilesystemSourceMigrator)
+ defer span.Finish()
+
+ // Find candidates
+ candidates := m.findMigrationCandidates()
+ if len(candidates) == 0 {
+ m.log.Debug("no filesets to migrate. exiting.")
+ return nil
+ }
+
+ m.log.Info("starting fileset migration", zap.Int("migrations", len(candidates)))
+
+ nowFn := m.fsOpts.ClockOptions().NowFn()
+ begin := nowFn()
+
+ // Setup workers to perform migrations
+ var (
+ numWorkers = m.migrationOpts.Concurrency()
+ workers = make([]*worker, 0, numWorkers)
+ )
+
+ baseOpts := migration.NewTaskOptions().
+ SetFilesystemOptions(m.fsOpts).
+ SetStorageOptions(m.storageOpts)
+ for i := 0; i < numWorkers; i++ {
+ // Give each worker their own persist manager so that we can write files concurrently.
+ pm, err := fs.NewPersistManager(m.fsOpts)
+ if err != nil {
+ return err
+ }
+ worker := &worker{
+ persistManager: pm,
+ taskOptions: baseOpts,
+ }
+ workers = append(workers, worker)
+ }
+
+ // Start up workers.
+ var (
+ wg sync.WaitGroup
+ candidatesPerWorker = len(candidates) / numWorkers
+ candidateIdx = 0
+
+ completedMigrationsLock sync.Mutex
+ completedMigrations = make([]completedMigration, 0, len(candidates))
+ )
+ for i, worker := range workers {
+ endIdx := candidateIdx + candidatesPerWorker
+ if i == len(workers)-1 {
+ endIdx = len(candidates)
+ }
+
+ worker := worker
+ startIdx := candidateIdx // Capture current candidateIdx value for goroutine
+ wg.Add(1)
+ go func() {
+ output := m.startWorker(worker, candidates[startIdx:endIdx])
+
+ completedMigrationsLock.Lock()
+ completedMigrations = append(completedMigrations, output...)
+ completedMigrationsLock.Unlock()
+
+ wg.Done()
+ }()
+
+ candidateIdx = endIdx
+ }
+
+ // Wait until all workers have finished and completedMigrations has been updated
+ wg.Wait()
+
+ migrationResults := make(map[mergeKey]fs.ReadInfoFileResult, len(candidates))
+ for _, result := range completedMigrations {
+ migrationResults[result.key] = result.updatedInfoFileResult
+ }
+
+ m.mergeUpdatedInfoFiles(migrationResults)
+
+ m.log.Info("fileset migration finished", zap.Duration("took", nowFn().Sub(begin)))
+
+ return nil
+}
+
+func (m *Migrator) findMigrationCandidates() []migrationCandidate {
+ maxCapacity := 0
+ for _, resultsByShard := range m.infoFilesByNamespace {
+ for _, results := range resultsByShard {
+ maxCapacity += len(results)
+ }
+ }
+
+ candidates := make([]migrationCandidate, 0, maxCapacity)
+ for md, resultsByShard := range m.infoFilesByNamespace {
+ for shard, results := range resultsByShard {
+ for _, info := range results {
+ newTaskFn, shouldMigrate := m.migrationTaskFn(info)
+ if shouldMigrate {
+ candidates = append(candidates, migrationCandidate{
+ newTaskFn: newTaskFn,
+ metadata: md,
+ shard: shard,
+ infoFileResult: info,
+ })
+ }
+ }
+ }
+ }
+
+ return candidates
+}
+
+func (m *Migrator) startWorker(worker *worker, candidates []migrationCandidate) []completedMigration {
+ output := make([]completedMigration, 0, len(candidates))
+ for _, candidate := range candidates {
+ task, err := candidate.newTaskFn(worker.taskOptions.
+ SetInfoFileResult(candidate.infoFileResult).
+ SetShard(candidate.shard).
+ SetNamespaceMetadata(candidate.metadata).
+ SetPersistManager(worker.persistManager))
+ if err != nil {
+ m.log.Error("error creating migration task", zap.Error(err))
+ }
+ // NB(nate): Handling of errors should be re-evaluated as migrations are added. Current migrations
+ // do not mutate state in such a way that data can be left in an invalid state in the case of failures. Additionally,
+ // we want to ensure that the bootstrap process is always able to continue. If either of these conditions change,
+ // error handling at this level AND the migrator level should be reconsidered.
+ infoFileResult, err := task.Run()
+ if err != nil {
+ m.log.Error("error running migration task", zap.Error(err))
+ } else {
+ output = append(output, completedMigration{
+ key: mergeKey{
+ metadata: candidate.metadata,
+ shard: candidate.shard,
+ blockStart: candidate.infoFileResult.Info.BlockStart,
+ },
+ updatedInfoFileResult: infoFileResult,
+ })
+ }
+ }
+
+ return output
+}
+
+// mergeUpdatedInfoFiles takes all ReadInfoFileResults updated by a migration and merges them back
+// into the infoFilesByNamespace map. This prevents callers from having to re-read info files to get
+// updated in-memory structures.
+func (m *Migrator) mergeUpdatedInfoFiles(migrationResults map[mergeKey]fs.ReadInfoFileResult) {
+ for md, resultsByShard := range m.infoFilesByNamespace {
+ for shard, results := range resultsByShard {
+ for i, info := range results {
+ if val, ok := migrationResults[mergeKey{
+ metadata: md,
+ shard: shard,
+ blockStart: info.Info.BlockStart,
+ }]; ok {
+ results[i] = val
+ }
+ }
+ }
+ }
+}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/migrator_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/migrator_test.go
new file mode 100644
index 0000000000..67e4cea200
--- /dev/null
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/migrator_test.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migrator
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
+ "github.com/m3db/m3/src/dbnode/persist/schema"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/x/context"
+ "github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMigratorRun(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ opts := testMigratorOptions(ctrl)
+
+ md1, err := namespace.NewMetadata(ident.StringID("foo"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ md2, err := namespace.NewMetadata(ident.StringID("bar"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ // Create some dummy ReadInfoFileResults as these are used to determine if we need to run a migration or not.
+ // Put some in a state requiring migrations and others not to flex both paths.
+ infoFilesByNamespace := fs.InfoFilesByNamespace{
+ md1: {
+ 1: {testInfoFileWithVolumeIndex(0), testInfoFileWithVolumeIndex(1)},
+ 2: {testInfoFileWithVolumeIndex(0), testInfoFileWithVolumeIndex(1)},
+ },
+ md2: {
+ 1: {testInfoFileWithVolumeIndex(0), testInfoFileWithVolumeIndex(0)},
+ 2: {testInfoFileWithVolumeIndex(0), testInfoFileWithVolumeIndex(1)},
+ },
+ }
+
+ opts = opts.
+ SetMigrationTaskFn(func(result fs.ReadInfoFileResult) (migration.NewTaskFn, bool) {
+ return newTestTask, result.Info.VolumeIndex == 0
+ }).
+ SetInfoFilesByNamespace(infoFilesByNamespace).
+ SetMigrationOptions(migration.NewOptions())
+
+ migrator, err := NewMigrator(opts)
+ require.NoError(t, err)
+
+ err = migrator.Run(context.NewContext())
+ require.NoError(t, err)
+
+ // Ensure every info file has a volume index of one.
+ for _, resultsByShard := range infoFilesByNamespace {
+ for _, results := range resultsByShard {
+ for _, info := range results {
+ require.Equal(t, 1, info.Info.VolumeIndex)
+ }
+ }
+ }
+}
+
+func testMigratorOptions(ctrl *gomock.Controller) Options {
+ mockOpts := storage.NewMockOptions(ctrl)
+ mockOpts.EXPECT().Validate().AnyTimes()
+
+ return NewOptions().
+ SetInstrumentOptions(instrument.NewOptions()).
+ SetMigrationOptions(migration.NewOptions()).
+ SetFilesystemOptions(fs.NewOptions()).
+ SetStorageOptions(mockOpts)
+}
+
+type testTask struct {
+ opts migration.TaskOptions
+}
+
+func newTestTask(opts migration.TaskOptions) (migration.Task, error) {
+ return &testTask{opts: opts}, nil
+}
+
+func (t *testTask) Run() (fs.ReadInfoFileResult, error) {
+ result := t.opts.InfoFileResult()
+ result.Info.VolumeIndex += 1
+ return result, nil
+}
+
+func testInfoFileWithVolumeIndex(volumeIndex int) fs.ReadInfoFileResult {
+ return fs.ReadInfoFileResult{Info: schema.IndexInfo{VolumeIndex: volumeIndex}}
+}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/options.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/options.go
new file mode 100644
index 0000000000..be077a602d
--- /dev/null
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/options.go
@@ -0,0 +1,144 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migrator
+
+import (
+ "errors"
+
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/x/instrument"
+)
+
+var (
+ errMigrationTaskFnNotSet = errors.New("migrationTaskFn not set")
+ errInfoFilesByNamespaceNotSet = errors.New("infoFilesByNamespaces not set")
+ errMigrationOptsNotSet = errors.New("migrationOpts not set")
+ errInstrumentOptsNotSet = errors.New("instrumentOpts not set")
+ errStorageOptsNotSet = errors.New("storageOpts not set")
+ errFilesystemOptsNotSet = errors.New("filesystemOpts not set")
+)
+
+type options struct {
+ migrationTaskFn MigrationTaskFn
+ infoFilesByNamespace fs.InfoFilesByNamespace
+ migrationOpts migration.Options
+ fsOpts fs.Options
+ instrumentOpts instrument.Options
+ storageOpts storage.Options
+}
+
+// NewOptions return new migration opts
+func NewOptions() Options {
+ return &options{
+ migrationOpts: migration.NewOptions(),
+ instrumentOpts: instrument.NewOptions(),
+ }
+}
+
+func (o *options) Validate() error {
+ if o.storageOpts == nil {
+ return errStorageOptsNotSet
+ }
+ if err := o.storageOpts.Validate(); err != nil {
+ return err
+ }
+ if o.migrationTaskFn == nil {
+ return errMigrationTaskFnNotSet
+ }
+ if o.infoFilesByNamespace == nil {
+ return errInfoFilesByNamespaceNotSet
+ }
+ if o.migrationOpts == nil {
+ return errMigrationOptsNotSet
+ }
+ if o.instrumentOpts == nil {
+ return errInstrumentOptsNotSet
+ }
+ if o.fsOpts == nil {
+ return errFilesystemOptsNotSet
+ }
+ if err := o.fsOpts.Validate(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (o *options) SetMigrationTaskFn(value MigrationTaskFn) Options {
+ opts := *o
+ opts.migrationTaskFn = value
+ return &opts
+}
+
+func (o *options) MigrationTaskFn() MigrationTaskFn {
+ return o.migrationTaskFn
+}
+
+func (o *options) SetInfoFilesByNamespace(value fs.InfoFilesByNamespace) Options {
+ opts := *o
+ opts.infoFilesByNamespace = value
+ return &opts
+}
+
+func (o *options) InfoFilesByNamespace() fs.InfoFilesByNamespace {
+ return o.infoFilesByNamespace
+}
+
+func (o *options) SetMigrationOptions(value migration.Options) Options {
+ opts := *o
+ opts.migrationOpts = value
+ return &opts
+}
+
+func (o *options) MigrationOptions() migration.Options {
+ return o.migrationOpts
+}
+
+func (o *options) SetFilesystemOptions(value fs.Options) Options {
+ opts := *o
+ opts.fsOpts = value
+ return &opts
+}
+
+func (o *options) FilesystemOptions() fs.Options {
+ return o.fsOpts
+}
+
+func (o *options) SetInstrumentOptions(value instrument.Options) Options {
+ opts := *o
+ opts.instrumentOpts = value
+ return &opts
+}
+
+func (o *options) InstrumentOptions() instrument.Options {
+ return o.instrumentOpts
+}
+
+func (o *options) SetStorageOptions(value storage.Options) Options {
+ opts := *o
+ opts.storageOpts = value
+ return &opts
+}
+
+func (o *options) StorageOptions() storage.Options {
+ return o.storageOpts
+}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/options_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/options_test.go
new file mode 100644
index 0000000000..ef3c9589ca
--- /dev/null
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/options_test.go
@@ -0,0 +1,115 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migrator
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
+ "github.com/m3db/m3/src/dbnode/storage"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/require"
+)
+
+func TestOptionsValidateStorageOptions(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ opts := newTestOptions(ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetStorageOptions(nil)
+ require.Error(t, opts.Validate())
+
+ opts = opts.SetStorageOptions(storage.NewOptions())
+ require.Error(t, opts.Validate())
+}
+
+func TestOptionsValidateMigrationTaskFn(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ opts := newTestOptions(ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetMigrationTaskFn(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestOptionsValidateInfoFilesByNamespace(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ opts := newTestOptions(ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetInfoFilesByNamespace(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestOptionsValidateMigrationOpts(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ opts := newTestOptions(ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetMigrationOptions(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestOptionsValidateInstrumentOpts(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ opts := newTestOptions(ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetInstrumentOptions(nil)
+ require.Error(t, opts.Validate())
+}
+
+func TestOptionsValidateFilesystemOpts(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ opts := newTestOptions(ctrl)
+ require.NoError(t, opts.Validate())
+
+ opts = opts.SetFilesystemOptions(nil)
+ require.Error(t, opts.Validate())
+}
+
+func newTestOptions(ctrl *gomock.Controller) Options {
+ mockOpts := storage.NewMockOptions(ctrl)
+ mockOpts.EXPECT().Validate().AnyTimes()
+
+ return NewOptions().
+ SetMigrationTaskFn(func(result fs.ReadInfoFileResult) (migration.NewTaskFn, bool) {
+ return nil, false
+ }).
+ SetInfoFilesByNamespace(make(fs.InfoFilesByNamespace)).
+ SetStorageOptions(mockOpts).
+ SetFilesystemOptions(fs.NewOptions())
+}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/types.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/types.go
new file mode 100644
index 0000000000..7e5ff20465
--- /dev/null
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator/types.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package migrator
+
+import (
+ "github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/x/instrument"
+)
+
+// MigrationTaskFn returns a fileset migration function and a boolean indicating if migration is necessary.
+type MigrationTaskFn func(result fs.ReadInfoFileResult) (migration.NewTaskFn, bool)
+
+// Options represents the options for the migrator.
+type Options interface {
+ // Validate checks that options are valid.
+ Validate() error
+
+ // SetMigrationTaskFn sets the function for determining if the migrator should migrate a fileset.
+ SetMigrationTaskFn(value MigrationTaskFn) Options
+
+ // MigrationTaskFn gets the function for determining if the migrator should migrate a fileset.
+ MigrationTaskFn() MigrationTaskFn
+
+ // SetInfoFilesByNamespaces sets the info file results to operate on keyed by namespace.
+ SetInfoFilesByNamespace(value fs.InfoFilesByNamespace) Options
+
+ // InfoFilesByNamespaces returns the info file results to operate on keyed by namespace.
+ InfoFilesByNamespace() fs.InfoFilesByNamespace
+
+ // SetMigrationOptions sets the migration options.
+ SetMigrationOptions(value migration.Options) Options
+
+ // MigrationOptions returns the migration options.
+ MigrationOptions() migration.Options
+
+ // SetFilesystemOptions sets the filesystem options.
+ SetFilesystemOptions(value fs.Options) Options
+
+ // FileSystemOptions returns the filesystem options.
+ FilesystemOptions() fs.Options
+
+ // SetInstrumentOptions sets the instrument options.
+ SetInstrumentOptions(value instrument.Options) Options
+
+ // InstrumentOptions returns the instrument options.
+ InstrumentOptions() instrument.Options
+
+ // SetStorageOptions sets the storage options.
+ SetStorageOptions(value storage.Options) Options
+
+ // StorageOptions returns the storage options.
+ StorageOptions() storage.Options
+}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go
index 74eace4051..f5b67f8473 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go
@@ -27,8 +27,9 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
"github.com/m3db/m3/src/dbnode/runtime"
- "github.com/m3db/m3/src/dbnode/storage/block"
+ "github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
@@ -42,6 +43,7 @@ var (
errCompactorNotSet = errors.New("compactor not set")
errIndexOptionsNotSet = errors.New("index options not set")
errFilesystemOptionsNotSet = errors.New("filesystem options not set")
+ errMigrationOptionsNotSet = errors.New("migration options not set")
// NB(r): Bootstrapping data doesn't use large amounts of memory
// that won't be released, so its fine to do this as fast as possible.
@@ -65,9 +67,10 @@ type options struct {
compactor *compaction.Compactor
bootstrapDataNumProcessors int
bootstrapIndexNumProcessors int
- blockRetrieverManager block.DatabaseBlockRetrieverManager
runtimeOptsMgr runtime.OptionsManager
identifierPool ident.Pool
+ migrationOpts migration.Options
+ storageOpts storage.Options
}
// NewOptions creates new bootstrap options
@@ -85,6 +88,8 @@ func NewOptions() Options {
bootstrapIndexNumProcessors: defaultBootstrapIndexNumProcessors,
runtimeOptsMgr: runtime.NewOptionsManager(),
identifierPool: idPool,
+ migrationOpts: migration.NewOptions(),
+ storageOpts: storage.NewOptions(),
}
}
@@ -101,6 +106,12 @@ func (o *options) Validate() error {
if o.fsOpts == nil {
return errFilesystemOptionsNotSet
}
+ if o.migrationOpts == nil {
+ return errMigrationOptionsNotSet
+ }
+ if err := o.migrationOpts.Validate(); err != nil {
+ return err
+ }
return nil
}
@@ -184,18 +195,6 @@ func (o *options) BoostrapIndexNumProcessors() int {
return o.bootstrapIndexNumProcessors
}
-func (o *options) SetDatabaseBlockRetrieverManager(
- value block.DatabaseBlockRetrieverManager,
-) Options {
- opts := *o
- opts.blockRetrieverManager = value
- return &opts
-}
-
-func (o *options) DatabaseBlockRetrieverManager() block.DatabaseBlockRetrieverManager {
- return o.blockRetrieverManager
-}
-
func (o *options) SetRuntimeOptionsManager(value runtime.OptionsManager) Options {
opts := *o
opts.runtimeOptsMgr = value
@@ -215,3 +214,23 @@ func (o *options) SetIdentifierPool(value ident.Pool) Options {
func (o *options) IdentifierPool() ident.Pool {
return o.identifierPool
}
+
+func (o *options) SetMigrationOptions(value migration.Options) Options {
+ opts := *o
+ opts.migrationOpts = value
+ return &opts
+}
+
+func (o *options) MigrationOptions() migration.Options {
+ return o.migrationOpts
+}
+
+func (o *options) SetStorageOptions(value storage.Options) Options {
+ opts := *o
+ opts.storageOpts = value
+ return &opts
+}
+
+func (o *options) StorageOptions() storage.Options {
+ return o.storageOpts
+}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
index d066de6735..924bdaac00 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
@@ -25,26 +25,33 @@ import (
"sync"
"time"
+ "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
+ "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper/fs/migrator"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/index/convert"
"github.com/m3db/m3/src/dbnode/storage/series"
+ "github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/m3ninx/doc"
- "github.com/m3db/m3/src/m3ninx/index/segment"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/opentracing/opentracing-go"
+ opentracinglog "github.com/opentracing/opentracing-go/log"
"github.com/uber-go/tally"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
@@ -66,13 +73,13 @@ type fileSystemSource struct {
opts Options
fsopts fs.Options
log *zap.Logger
+ nowFn clock.NowFn
idPool ident.Pool
newReaderFn newDataFileSetReaderFn
newReaderPoolOpts bootstrapper.NewReaderPoolOptions
persistManager *bootstrapper.SharedPersistManager
compactor *bootstrapper.SharedCompactor
metrics fileSystemSourceMetrics
- builder *result.IndexBuilder
}
type fileSystemSourceMetrics struct {
@@ -90,16 +97,11 @@ func newFileSystemSource(opts Options) (bootstrap.Source, error) {
iopts = iopts.SetMetricsScope(scope)
opts = opts.SetInstrumentOptions(iopts)
- alloc := opts.ResultOptions().IndexDocumentsBuilderAllocator()
- segBuilder, err := alloc()
- if err != nil {
- return nil, err
- }
-
s := &fileSystemSource{
opts: opts,
fsopts: opts.FilesystemOptions(),
log: iopts.Logger().With(zap.String("bootstrapper", "filesystem")),
+ nowFn: opts.ResultOptions().ClockOptions().NowFn(),
idPool: opts.IdentifierPool(),
newReaderFn: fs.NewReader,
persistManager: &bootstrapper.SharedPersistManager{
@@ -108,7 +110,6 @@ func newFileSystemSource(opts Options) (bootstrap.Source, error) {
compactor: &bootstrapper.SharedCompactor{
Compactor: opts.Compactor(),
},
- builder: result.NewIndexBuilder(segBuilder),
metrics: fileSystemSourceMetrics{
persistedIndexBlocksRead: scope.Counter("persist-index-blocks-read"),
persistedIndexBlocksWrite: scope.Counter("persist-index-blocks-write"),
@@ -121,43 +122,62 @@ func newFileSystemSource(opts Options) (bootstrap.Source, error) {
func (s *fileSystemSource) AvailableData(
md namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
runOpts bootstrap.RunOptions,
) (result.ShardTimeRanges, error) {
- return s.availability(md, shardsTimeRanges)
+ return s.availability(md, shardTimeRanges)
}
func (s *fileSystemSource) AvailableIndex(
md namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
runOpts bootstrap.RunOptions,
) (result.ShardTimeRanges, error) {
- return s.availability(md, shardsTimeRanges)
+ return s.availability(md, shardTimeRanges)
}
func (s *fileSystemSource) Read(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
+ ctx, span, _ := ctx.StartSampledTraceSpan(tracepoint.BootstrapperFilesystemSourceRead)
+ defer span.Finish()
+
results := bootstrap.NamespaceResults{
Results: bootstrap.NewNamespaceResultsMap(bootstrap.NamespaceResultsMapOptions{}),
}
+ alloc := s.opts.ResultOptions().IndexDocumentsBuilderAllocator()
+ segBuilder, err := alloc()
+ if err != nil {
+ return bootstrap.NamespaceResults{}, err
+ }
+ builder := result.NewIndexBuilder(segBuilder)
+
+ // Preload info file results so they can be used to bootstrap data filesets and data migrations
+ infoFilesByNamespace := s.loadInfoFiles(namespaces)
+
+ // Perform any necessary migrations but don't block bootstrap process on failure. Will update info file
+ // in-memory structures in place if migrations have written new files to disk. This saves us the need from
+ // having to re-read migrated info files.
+ s.runMigrations(ctx, infoFilesByNamespace)
+
// NB(r): Perform all data bootstrapping first then index bootstrapping
// to more clearly deliniate which process is slower than the other.
- nowFn := s.opts.ResultOptions().ClockOptions().NowFn()
- start := nowFn()
+ start := s.nowFn()
dataLogFields := []zapcore.Field{
zap.Stringer("cachePolicy", s.opts.ResultOptions().SeriesCachePolicy()),
}
s.log.Info("bootstrapping time series data start",
dataLogFields...)
+ span.LogEvent("bootstrap_data_start")
for _, elem := range namespaces.Namespaces.Iter() {
namespace := elem.Value()
md := namespace.Metadata
r, err := s.read(bootstrapDataRunType, md, namespace.DataAccumulator,
namespace.DataRunOptions.ShardTimeRanges,
- namespace.DataRunOptions.RunOptions)
+ namespace.DataRunOptions.RunOptions, builder, span, infoFilesByNamespace)
if err != nil {
return bootstrap.NamespaceResults{}, err
}
@@ -169,10 +189,12 @@ func (s *fileSystemSource) Read(
})
}
s.log.Info("bootstrapping time series data success",
- append(dataLogFields, zap.Duration("took", nowFn().Sub(start)))...)
+ append(dataLogFields, zap.Duration("took", s.nowFn().Sub(start)))...)
+ span.LogEvent("bootstrap_data_done")
- start = nowFn()
+ start = s.nowFn()
s.log.Info("bootstrapping index metadata start")
+ span.LogEvent("bootstrap_index_start")
for _, elem := range namespaces.Namespaces.Iter() {
namespace := elem.Value()
md := namespace.Metadata
@@ -185,7 +207,7 @@ func (s *fileSystemSource) Read(
r, err := s.read(bootstrapIndexRunType, md, namespace.DataAccumulator,
namespace.IndexRunOptions.ShardTimeRanges,
- namespace.IndexRunOptions.RunOptions)
+ namespace.IndexRunOptions.RunOptions, builder, span, infoFilesByNamespace)
if err != nil {
return bootstrap.NamespaceResults{}, err
}
@@ -201,18 +223,50 @@ func (s *fileSystemSource) Read(
results.Results.Set(md.ID(), result)
}
s.log.Info("bootstrapping index metadata success",
- zap.Stringer("took", nowFn().Sub(start)))
+ zap.Duration("took", s.nowFn().Sub(start)))
+ span.LogEvent("bootstrap_index_done")
return results, nil
}
+func (s *fileSystemSource) runMigrations(ctx context.Context, infoFilesByNamespace fs.InfoFilesByNamespace) {
+ // Only one migration for now, so just short circuit entirely if not enabled
+ if s.opts.MigrationOptions().TargetMigrationVersion() != migration.MigrationVersion_1_1 {
+ return
+ }
+
+ migrator, err := migrator.NewMigrator(migrator.NewOptions().
+ SetMigrationTaskFn(migration.MigrationTask).
+ SetInfoFilesByNamespace(infoFilesByNamespace).
+ SetMigrationOptions(s.opts.MigrationOptions()).
+ SetFilesystemOptions(s.fsopts).
+ SetInstrumentOptions(s.opts.InstrumentOptions()).
+ SetStorageOptions(s.opts.StorageOptions()))
+ if err != nil {
+ s.log.Error("error creating migrator. continuing bootstrap", zap.Error(err))
+ }
+
+ // NB(nate): Handling of errors should be re-evaluated as migrations are added. Current migrations
+ // do not mutate state in such a way that data can be left in an invalid state in the case of failures. Additionally,
+ // we want to ensure that the bootstrap process is always able to continue. If either of these conditions change,
+ // error handling at this level AND the individual migration task level should be reconsidered.
+ //
+ // One final note, as more migrations are introduced and the complexity is increased, we may want to consider adding
+ // 1) a recovery mechanism to ensure that repeatable panics don't create a crash loop and
+ // 2) state tracking to abort migration attempts after a certain number of consecutive failures.
+ // For now, simply setting the target migration to "None" in config is enough to mitigate both of these cases.
+ if err = migrator.Run(ctx); err != nil {
+ s.log.Error("error performing migrations. continuing bootstrap", zap.Error(err))
+ }
+}
+
func (s *fileSystemSource) availability(
md namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
) (result.ShardTimeRanges, error) {
- result := make(map[uint32]xtime.Ranges, len(shardsTimeRanges))
- for shard, ranges := range shardsTimeRanges {
- result[shard] = s.shardAvailability(md.ID(), shard, ranges)
+ result := result.NewShardTimeRangesFromSize(shardTimeRanges.Len())
+ for shard, ranges := range shardTimeRanges.Iter() {
+ result.Set(shard, s.shardAvailability(md.ID(), shard, ranges))
}
return result, nil
}
@@ -223,13 +277,23 @@ func (s *fileSystemSource) shardAvailability(
targetRangesForShard xtime.Ranges,
) xtime.Ranges {
if targetRangesForShard.IsEmpty() {
- return xtime.Ranges{}
+ return xtime.NewRanges()
}
readInfoFilesResults := fs.ReadInfoFiles(s.fsopts.FilePathPrefix(),
- namespace, shard, s.fsopts.InfoReaderBufferSize(), s.fsopts.DecodingOptions())
+ namespace, shard, s.fsopts.InfoReaderBufferSize(), s.fsopts.DecodingOptions(),
+ persist.FileSetFlushType)
+
+ return s.shardAvailabilityWithInfoFiles(namespace, shard, targetRangesForShard, readInfoFilesResults)
+}
- var tr xtime.Ranges
+func (s *fileSystemSource) shardAvailabilityWithInfoFiles(
+ namespace ident.ID,
+ shard uint32,
+ targetRangesForShard xtime.Ranges,
+ readInfoFilesResults []fs.ReadInfoFileResult,
+) xtime.Ranges {
+ tr := xtime.NewRanges()
for i := 0; i < len(readInfoFilesResults); i++ {
result := readInfoFilesResults[i]
if err := result.Err.Error(); err != nil {
@@ -247,7 +311,7 @@ func (s *fileSystemSource) shardAvailability(
w := time.Duration(info.BlockSize)
currRange := xtime.Range{Start: t, End: t.Add(w)}
if targetRangesForShard.Overlaps(currRange) {
- tr = tr.AddRange(currRange)
+ tr.AddRange(currRange)
}
}
return tr
@@ -260,6 +324,7 @@ func (s *fileSystemSource) bootstrapFromReaders(
runOpts bootstrap.RunOptions,
readerPool *bootstrapper.ReaderPool,
readersCh <-chan bootstrapper.TimeWindowReaders,
+ builder *result.IndexBuilder,
) *runResult {
var (
runResult = newRunResult()
@@ -269,10 +334,10 @@ func (s *fileSystemSource) bootstrapFromReaders(
for timeWindowReaders := range readersCh {
// NB(bodu): Since we are re-using the same builder for all bootstrapped index blocks,
// it is not thread safe and requires reset after every processed index block.
- s.builder.Builder().Reset(0)
+ builder.Builder().Reset()
s.loadShardReadersDataIntoShardResult(run, ns, accumulator,
- runOpts, runResult, resultOpts, timeWindowReaders, readerPool)
+ runOpts, runResult, resultOpts, timeWindowReaders, readerPool, builder)
}
return runResult
@@ -325,6 +390,7 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
ropts result.Options,
timeWindowReaders bootstrapper.TimeWindowReaders,
readerPool *bootstrapper.ReaderPool,
+ builder *result.IndexBuilder,
) {
var (
blockPool = ropts.DatabaseBlockOptions().DatabaseBlockPool()
@@ -334,7 +400,7 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
docsPool = s.opts.IndexOptions().DocumentArrayPool()
batch = docsPool.Get()
totalEntries int
- totalFulfilledRanges = result.ShardTimeRanges{}
+ totalFulfilledRanges = result.NewShardTimeRanges()
)
defer docsPool.Put(batch)
@@ -370,11 +436,10 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
runResult, start, blockSize, blockPool, seriesCachePolicy)
case bootstrapIndexRunType:
// We can just read the entry and index if performing an index run.
- batch, err = s.readNextEntryAndMaybeIndex(r, batch)
+ batch, err = s.readNextEntryAndMaybeIndex(r, batch, builder)
if err != nil {
- s.log.Error("readNextEntryAndMaybeIndex failed",
- zap.String("error", err.Error()),
- zap.String("timeRangeStart", fmt.Sprintf("%v", timeRange.Start)))
+ s.log.Error("readNextEntryAndMaybeIndex failed", zap.Error(err),
+ zap.Time("timeRangeStart", timeRange.Start))
}
totalEntries++
default:
@@ -384,11 +449,10 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
}
// NB(bodu): Only flush if we've experienced no errors up to this point.
if err == nil && len(batch) > 0 {
- batch, err = s.builder.FlushBatch(batch)
+ batch, err = builder.FlushBatch(batch)
if err != nil {
- s.log.Error("FlushBatch failed",
- zap.String("error", err.Error()),
- zap.String("timeRangeStart", fmt.Sprintf("%v", timeRange.Start)))
+ s.log.Error("builder FlushBatch failed", zap.Error(err),
+ zap.Time("timeRangeStart", timeRange.Start))
}
}
@@ -415,27 +479,23 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
if err == nil && run == bootstrapIndexRunType {
// Mark index block as fulfilled.
- fulfilled := result.ShardTimeRanges{
- shard: xtime.Ranges{}.AddRange(timeRange),
- }
+ fulfilled := result.NewShardTimeRanges().Set(shard, xtime.NewRanges(timeRange))
err = runResult.index.IndexResults().MarkFulfilled(start, fulfilled,
- ns.Options().IndexOptions())
+ // NB(bodu): By default, we always load bootstrapped data into the default index volume.
+ idxpersist.DefaultIndexVolumeType, ns.Options().IndexOptions())
if err != nil {
- s.log.Error("MarkFulfilled failed",
- zap.String("error", err.Error()),
- zap.String("timeRangeStart", fmt.Sprintf("%v", timeRange.Start)))
+ s.log.Error("indexResults MarkFulfilled failed", zap.Error(err),
+ zap.Time("timeRangeStart", timeRange.Start))
}
}
if err == nil {
- fulfilled := result.ShardTimeRanges{
- shard: xtime.Ranges{}.AddRange(timeRange),
- }
+ fulfilled := result.NewShardTimeRanges().Set(shard, xtime.NewRanges(timeRange))
totalFulfilledRanges.AddRanges(fulfilled)
remainingRanges.Subtract(fulfilled)
} else {
- s.log.Error(err.Error(),
- zap.String("timeRangeStart", fmt.Sprintf("%v", timeRange.Start)))
+ s.log.Error("unknown error", zap.Error(err),
+ zap.Time("timeRangeStart", timeRange.Start))
timesWithErrors = append(timesWithErrors, timeRange.Start)
}
}
@@ -452,18 +512,21 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
var (
indexBlockSize = ns.Options().IndexOptions().BlockSize()
retentionPeriod = ns.Options().RetentionOptions().RetentionPeriod()
- nowFn = s.opts.ResultOptions().ClockOptions().NowFn()
beginningOfIndexRetention = retention.FlushTimeStartForRetentionPeriod(
- retentionPeriod, indexBlockSize, nowFn())
+ retentionPeriod, indexBlockSize, s.nowFn())
initialIndexRange = xtime.Range{
Start: beginningOfIndexRetention,
End: beginningOfIndexRetention.Add(indexBlockSize),
}
overlapsWithInitalIndexRange = false
min, max = requestedRanges.MinMax()
+ blockStart = min.Truncate(indexBlockSize)
+ blockEnd = blockStart.Add(indexBlockSize)
iopts = s.opts.ResultOptions().InstrumentOptions()
+ indexBlock result.IndexBlock
+ err error
)
- for _, remainingRange := range remainingRanges {
+ for _, remainingRange := range remainingRanges.Iter() {
if remainingRange.Overlaps(initialIndexRange) {
overlapsWithInitalIndexRange = true
}
@@ -471,6 +534,28 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
remainingMin, remainingMax := remainingRanges.MinMax()
fulfilledMin, fulfilledMax := totalFulfilledRanges.MinMax()
+
+ // NB(bodu): Assume if we're bootstrapping data from disk that it is the "default" index volume type.
+ existingIndexBlock, ok := bootstrapper.GetDefaultIndexBlockForBlockStart(runResult.index.IndexResults(), blockStart)
+ if !ok {
+ err := fmt.Errorf("could not find index block in results: time=%s, ts=%d",
+ blockStart.String(), blockStart.UnixNano())
+ instrument.EmitAndLogInvariantViolation(iopts, func(l *zap.Logger) {
+ l.Error("index bootstrap failed",
+ zap.Error(err),
+ zap.Stringer("namespace", ns.ID()),
+ zap.Stringer("requestedRanges", requestedRanges))
+ })
+ }
+
+ // Determine if should flush data for range.
+ persistCfg := runOpts.PersistConfig()
+ shouldFlush := persistCfg.Enabled &&
+ persistCfg.FileSetType == persist.FileSetFlushType
+
+ // Determine all requested ranges were fulfilled or at edge of retention
+ satisifiedFlushRanges := noneRemaining || overlapsWithInitalIndexRange
+
buildIndexLogFields := []zapcore.Field{
zap.Stringer("namespace", ns.ID()),
zap.Bool("shouldBuildSegment", shouldBuildSegment),
@@ -483,24 +568,23 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
zap.String("totalFulfilledRangesMinMax", fmt.Sprintf("%v - %v", fulfilledMin, fulfilledMax)),
zap.String("totalFulfilledRanges", totalFulfilledRanges.SummaryString()),
zap.String("initialIndexRange", fmt.Sprintf("%v - %v", initialIndexRange.Start, initialIndexRange.End)),
+ zap.Bool("shouldFlush", shouldFlush),
+ zap.Bool("satisifiedFlushRanges", satisifiedFlushRanges),
}
- // Determine if should flush data for range.
- persistCfg := runOpts.PersistConfig()
- shouldFlush := persistCfg.Enabled &&
- persistCfg.FileSetType == persist.FileSetFlushType
- // Determine all requested ranges were fulfilled or at edge of retention
- satisifiedFlushRanges := noneRemaining || overlapsWithInitalIndexRange
if shouldFlush && satisifiedFlushRanges {
- s.log.Info("building file set index segment", buildIndexLogFields...)
- if err := bootstrapper.PersistBootstrapIndexSegment(
+ s.log.Debug("building file set index segment", buildIndexLogFields...)
+ indexBlock, err = bootstrapper.PersistBootstrapIndexSegment(
ns,
requestedRanges,
- runResult.index.IndexResults(),
- s.builder.Builder(),
+ builder.Builder(),
s.persistManager,
s.opts.ResultOptions(),
- ); err != nil {
+ existingIndexBlock.Fulfilled(),
+ blockStart,
+ blockEnd,
+ )
+ if err != nil {
instrument.EmitAndLogInvariantViolation(iopts, func(l *zap.Logger) {
l.Error("persist fs index bootstrap failed",
zap.Error(err),
@@ -512,15 +596,17 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
s.metrics.persistedIndexBlocksWrite.Inc(1)
} else {
s.log.Info("building in-memory index segment", buildIndexLogFields...)
- if err := bootstrapper.BuildBootstrapIndexSegment(
+ indexBlock, err = bootstrapper.BuildBootstrapIndexSegment(
ns,
requestedRanges,
- runResult.index.IndexResults(),
- s.builder.Builder(),
+ builder.Builder(),
s.compactor,
s.opts.ResultOptions(),
s.opts.FilesystemOptions().MmapReporter(),
- ); err != nil {
+ blockStart,
+ blockEnd,
+ )
+ if err != nil {
iopts := s.opts.ResultOptions().InstrumentOptions()
instrument.EmitAndLogInvariantViolation(iopts, func(l *zap.Logger) {
l.Error("build fs index bootstrap failed",
@@ -530,6 +616,17 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
})
}
}
+
+ // Merge segments and fulfilled time ranges.
+ segments := indexBlock.Segments()
+ for _, seg := range existingIndexBlock.Segments() {
+ segments = append(segments, seg)
+ }
+ newFulfilled := existingIndexBlock.Fulfilled().Copy()
+ newFulfilled.AddRanges(indexBlock.Fulfilled())
+
+ // Replace index block for default index volume type.
+ runResult.index.IndexResults()[xtime.ToUnixNano(blockStart)].SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock(segments, newFulfilled))
}
// Return readers to pool.
@@ -593,7 +690,7 @@ func (s *fileSystemSource) readNextEntryAndRecordBlock(
return fmt.Errorf("unable to checkout series: %v", err)
}
- seg := ts.NewSegment(data, nil, ts.FinalizeHead)
+ seg := ts.NewSegment(data, nil, 0, ts.FinalizeHead)
seriesBlock.Reset(blockStart, blockSize, seg, nsCtx)
if err := ref.Series.LoadBlock(seriesBlock, series.WarmWrite); err != nil {
return fmt.Errorf("unable to load block: %v", err)
@@ -605,6 +702,7 @@ func (s *fileSystemSource) readNextEntryAndRecordBlock(
func (s *fileSystemSource) readNextEntryAndMaybeIndex(
r fs.DataFileSetReader,
batch []doc.Document,
+ builder *result.IndexBuilder,
) ([]doc.Document, error) {
// If performing index run, then simply read the metadata and add to segment.
id, tagsIter, _, _, err := r.ReadMetadata()
@@ -612,7 +710,7 @@ func (s *fileSystemSource) readNextEntryAndMaybeIndex(
return batch, err
}
- d, err := convert.FromMetricIter(id, tagsIter)
+ d, err := convert.FromSeriesIDAndTagIter(id, tagsIter)
// Finalize the ID and tags.
id.Finalize()
tagsIter.Close()
@@ -623,7 +721,7 @@ func (s *fileSystemSource) readNextEntryAndMaybeIndex(
batch = append(batch, d)
if len(batch) >= index.DocumentArrayPoolCapacity {
- return s.builder.FlushBatch(batch)
+ return builder.FlushBatch(batch)
}
return batch, nil
@@ -633,14 +731,17 @@ func (s *fileSystemSource) read(
run runType,
md namespace.Metadata,
accumulator bootstrap.NamespaceDataAccumulator,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
runOpts bootstrap.RunOptions,
+ builder *result.IndexBuilder,
+ span opentracing.Span,
+ infoFilesByNamespace fs.InfoFilesByNamespace,
) (*runResult, error) {
var (
seriesCachePolicy = s.opts.ResultOptions().SeriesCachePolicy()
res *runResult
)
- if shardsTimeRanges.IsEmpty() {
+ if shardTimeRanges.IsEmpty() {
return newRunResult(), nil
}
@@ -659,25 +760,34 @@ func (s *fileSystemSource) read(
if seriesCachePolicy != series.CacheAll {
// Unless we're caching all series (or all series metadata) in memory, we
// return just the availability of the files we have.
- return s.bootstrapDataRunResultFromAvailability(md, shardsTimeRanges), nil
+ return s.bootstrapDataRunResultFromAvailability(md, shardTimeRanges, infoFilesByNamespace), nil
}
}
+ logSpan := func(event string) {
+ span.LogFields(
+ opentracinglog.String("event", event),
+ opentracinglog.String("nsID", md.ID().String()),
+ opentracinglog.String("shardTimeRanges", shardTimeRanges.SummaryString()),
+ )
+ }
if run == bootstrapIndexRunType {
+ logSpan("bootstrap_from_index_persisted_blocks_start")
// NB(r): First read all the FSTs and add to runResult index results,
// subtract the shard + time ranges from what we intend to bootstrap
// for those we found.
r, err := s.bootstrapFromIndexPersistedBlocks(md,
- shardsTimeRanges)
+ shardTimeRanges)
if err != nil {
s.log.Warn("filesystem bootstrapped failed to read persisted index blocks")
} else {
// We may have less we need to read
- shardsTimeRanges = shardsTimeRanges.Copy()
- shardsTimeRanges.Subtract(r.fulfilled)
+ shardTimeRanges = shardTimeRanges.Copy()
+ shardTimeRanges.Subtract(r.fulfilled)
// Set or merge result.
setOrMergeResult(r.result)
}
+ logSpan("bootstrap_from_index_persisted_blocks_done")
}
// Create a reader pool once per bootstrap as we don't really want to
@@ -695,10 +805,24 @@ func (s *fileSystemSource) read(
panic(fmt.Errorf("unrecognized run type: %d", run))
}
runtimeOpts := s.opts.RuntimeOptionsManager().Get()
- go bootstrapper.EnqueueReaders(md, runOpts, runtimeOpts, s.fsopts, shardsTimeRanges,
- readerPool, readersCh, blockSize, s.log)
+ go bootstrapper.EnqueueReaders(bootstrapper.EnqueueReadersOptions{
+ NsMD: md,
+ RunOpts: runOpts,
+ RuntimeOpts: runtimeOpts,
+ FsOpts: s.fsopts,
+ ShardTimeRanges: shardTimeRanges,
+ ReaderPool: readerPool,
+ ReadersCh: readersCh,
+ BlockSize: blockSize,
+ // NB(bodu): We only read metadata when bootstrap index
+ // so we do not need to sort the data fileset reader.
+ OptimizedReadMetadataOnly: run == bootstrapIndexRunType,
+ Logger: s.log,
+ Span: span,
+ NowFn: s.nowFn,
+ })
bootstrapFromDataReadersResult := s.bootstrapFromReaders(run, md,
- accumulator, runOpts, readerPool, readersCh)
+ accumulator, runOpts, readerPool, readersCh, builder)
// Merge any existing results if necessary.
setOrMergeResult(bootstrapFromDataReadersResult)
@@ -713,20 +837,23 @@ func (s *fileSystemSource) newReader() (fs.DataFileSetReader, error) {
func (s *fileSystemSource) bootstrapDataRunResultFromAvailability(
md namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
+ infoFilesByNamespace fs.InfoFilesByNamespace,
) *runResult {
runResult := newRunResult()
unfulfilled := runResult.data.Unfulfilled()
- for shard, ranges := range shardsTimeRanges {
+ for shard, ranges := range shardTimeRanges.Iter() {
if ranges.IsEmpty() {
continue
}
- availability := s.shardAvailability(md.ID(), shard, ranges)
- remaining := ranges.RemoveRanges(availability)
+ availability := s.shardAvailabilityWithInfoFiles(md.ID(), shard, ranges, infoFilesByNamespace[md][shard])
+ remaining := ranges.Clone()
+ remaining.RemoveRanges(availability)
if !remaining.IsEmpty() {
- unfulfilled.AddRanges(result.ShardTimeRanges{
- shard: remaining,
- })
+ unfulfilled.AddRanges(result.NewShardTimeRanges().Set(
+ shard,
+ remaining,
+ ))
}
}
runResult.data.SetUnfulfilled(unfulfilled)
@@ -740,10 +867,10 @@ type bootstrapFromIndexPersistedBlocksResult struct {
func (s *fileSystemSource) bootstrapFromIndexPersistedBlocks(
ns namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
) (bootstrapFromIndexPersistedBlocksResult, error) {
res := bootstrapFromIndexPersistedBlocksResult{
- fulfilled: result.ShardTimeRanges{},
+ fulfilled: result.NewShardTimeRanges(),
}
indexBlockSize := ns.Options().IndexOptions().BlockSize()
@@ -755,7 +882,7 @@ func (s *fileSystemSource) bootstrapFromIndexPersistedBlocks(
s.log.Error("unable to read index info file",
zap.Stringer("namespace", ns.ID()),
zap.Error(err),
- zap.Stringer("shardsTimeRanges", shardsTimeRanges),
+ zap.Stringer("shardTimeRanges", shardTimeRanges),
zap.String("filepath", infoFile.Err.Filepath()),
)
continue
@@ -767,13 +894,16 @@ func (s *fileSystemSource) bootstrapFromIndexPersistedBlocks(
Start: indexBlockStart,
End: indexBlockStart.Add(indexBlockSize),
}
- willFulfill := result.ShardTimeRanges{}
+ willFulfill := result.NewShardTimeRanges()
for _, shard := range info.Shards {
- tr, ok := shardsTimeRanges[shard]
+ tr, ok := shardTimeRanges.Get(shard)
if !ok {
// No ranges match for this shard.
continue
}
+ if _, ok := willFulfill.Get(shard); !ok {
+ willFulfill.Set(shard, xtime.NewRanges())
+ }
iter := tr.Iter()
for iter.Next() {
@@ -782,7 +912,7 @@ func (s *fileSystemSource) bootstrapFromIndexPersistedBlocks(
if !intersects {
continue
}
- willFulfill[shard] = willFulfill[shard].AddRange(intersection)
+ willFulfill.GetOrAdd(shard).AddRange(intersection)
}
}
@@ -817,22 +947,47 @@ func (s *fileSystemSource) bootstrapFromIndexPersistedBlocks(
}
segmentsFulfilled := willFulfill
// NB(bodu): All segments read from disk are already persisted.
- persistedSegments := make([]segment.Segment, 0, len(segments))
+ persistedSegments := make([]result.Segment, 0, len(segments))
for _, segment := range segments {
- persistedSegments = append(persistedSegments, bootstrapper.NewSegment(segment, true))
+ persistedSegments = append(persistedSegments, result.NewSegment(segment, true))
+ }
+ volumeType := idxpersist.DefaultIndexVolumeType
+ if info.IndexVolumeType != nil {
+ volumeType = idxpersist.IndexVolumeType(info.IndexVolumeType.Value)
}
- indexBlock := result.NewIndexBlock(indexBlockStart, persistedSegments,
- segmentsFulfilled)
+ indexBlockByVolumeType := result.NewIndexBlockByVolumeType(indexBlockStart)
+ indexBlockByVolumeType.SetBlock(volumeType, result.NewIndexBlock(persistedSegments, segmentsFulfilled))
// NB(r): Don't need to call MarkFulfilled on the IndexResults here
// as we've already passed the ranges fulfilled to the block that
// we place in the IndexResuts with the call to Add(...).
- res.result.index.Add(indexBlock, nil)
+ res.result.index.Add(indexBlockByVolumeType, nil)
res.fulfilled.AddRanges(segmentsFulfilled)
}
return res, nil
}
+func (s *fileSystemSource) loadInfoFiles(
+ namespaces bootstrap.Namespaces,
+) fs.InfoFilesByNamespace {
+ infoFilesByNamespace := make(fs.InfoFilesByNamespace)
+
+ for _, elem := range namespaces.Namespaces.Iter() {
+ namespace := elem.Value()
+ shardTimeRanges := namespace.DataRunOptions.ShardTimeRanges
+ result := make(fs.InfoFileResultsPerShard, shardTimeRanges.Len())
+ for shard := range shardTimeRanges.Iter() {
+ result[shard] = fs.ReadInfoFiles(s.fsopts.FilePathPrefix(),
+ namespace.Metadata.ID(), shard, s.fsopts.InfoReaderBufferSize(), s.fsopts.DecodingOptions(),
+ persist.FileSetFlushType)
+ }
+
+ infoFilesByNamespace[namespace.Metadata] = result
+ }
+
+ return infoFilesByNamespace
+}
+
type runResult struct {
sync.RWMutex
data result.DataBootstrapResult
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go
index 2a68ed358a..8d1f757ce1 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go
@@ -32,8 +32,13 @@ import (
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
+ "github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
@@ -43,6 +48,7 @@ import (
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
@@ -172,7 +178,7 @@ func testTimeRanges() xtime.Ranges {
}
func testShardTimeRanges() result.ShardTimeRanges {
- return map[uint32]xtime.Ranges{testShard: testTimeRanges()}
+ return result.NewShardTimeRanges().Set(testShard, testTimeRanges())
}
func testBootstrappingIndexShardTimeRanges() result.ShardTimeRanges {
@@ -180,15 +186,20 @@ func testBootstrappingIndexShardTimeRanges() result.ShardTimeRanges {
// `testBlockSize` values should be fulfilled in the index block. This is
// `testBlockSize` rather than `testIndexSize` since the files generated
// by this test use 2 hour (which is `testBlockSize`) reader blocks.
- return map[uint32]xtime.Ranges{
- testShard: xtime.Ranges{}.AddRange(xtime.Range{
+ return result.NewShardTimeRanges().Set(
+ testShard,
+ xtime.NewRanges(xtime.Range{
Start: testStart.Add(testBlockSize),
End: testStart.Add(11 * time.Hour),
}),
- }
+ )
}
func writeGoodFiles(t *testing.T, dir string, namespace ident.ID, shard uint32) {
+ writeGoodFilesWithFsOpts(t, namespace, shard, newTestFsOptions(dir))
+}
+
+func writeGoodFilesWithFsOpts(t *testing.T, namespace ident.ID, shard uint32, fsOpts fs.Options) {
inputs := []struct {
start time.Time
id string
@@ -201,8 +212,8 @@ func writeGoodFiles(t *testing.T, dir string, namespace ident.ID, shard uint32)
}
for _, input := range inputs {
- writeTSDBFiles(t, dir, namespace, shard, input.start,
- []testSeries{{input.id, input.tags, input.data}})
+ writeTSDBFilesWithFsOpts(t, namespace, shard, input.start,
+ []testSeries{{input.id, input.tags, input.data}}, fsOpts)
}
}
@@ -244,7 +255,18 @@ func writeTSDBFiles(
start time.Time,
series []testSeries,
) {
- w, err := fs.NewWriter(newTestFsOptions(dir))
+ writeTSDBFilesWithFsOpts(t, namespace, shard, start, series, newTestFsOptions(dir))
+}
+
+func writeTSDBFilesWithFsOpts(
+ t require.TestingT,
+ namespace ident.ID,
+ shard uint32,
+ start time.Time,
+ series []testSeries,
+ opts fs.Options,
+) {
+ w, err := fs.NewWriter(opts)
require.NoError(t, err)
writerOpts := fs.DataWriterOpenOptions{
Identifier: fs.FileSetFileIdentifier{
@@ -259,8 +281,9 @@ func writeTSDBFiles(
for _, v := range series {
bytes := checked.NewBytes(v.data, nil)
bytes.IncRef()
- require.NoError(t, w.Write(ident.StringID(v.id),
- sortedTagsFromTagsMap(v.tags), bytes, digest.Checksum(bytes.Bytes())))
+ metadata := persist.NewMetadataFromIDAndTags(ident.StringID(v.id), sortedTagsFromTagsMap(v.tags),
+ persist.MetadataOptions{})
+ require.NoError(t, w.Write(metadata, bytes, digest.Checksum(bytes.Bytes())))
bytes.DecRef()
}
@@ -284,10 +307,13 @@ func sortedTagsFromTagsMap(tags map[string]string) ident.Tags {
func validateTimeRanges(t *testing.T, tr xtime.Ranges, expected xtime.Ranges) {
// Make range eclipses expected
- require.True(t, expected.RemoveRanges(tr).IsEmpty())
+ expectedWithRemovedRanges := expected.Clone()
+ expectedWithRemovedRanges.RemoveRanges(tr)
+ require.True(t, expectedWithRemovedRanges.IsEmpty())
// Now make sure no ranges outside of expected
- expectedWithAddedRanges := expected.AddRanges(tr)
+ expectedWithAddedRanges := expected.Clone()
+ expectedWithAddedRanges.AddRanges(tr)
require.Equal(t, expected.Len(), expectedWithAddedRanges.Len())
iter := expected.Iter()
@@ -302,7 +328,7 @@ func TestAvailableEmptyRangeError(t *testing.T) {
require.NoError(t, err)
res, err := src.AvailableData(
testNsMetadata(t),
- map[uint32]xtime.Ranges{0: xtime.Ranges{}},
+ result.NewShardTimeRanges().Set(0, xtime.NewRanges()),
testDefaultRunOpts,
)
require.NoError(t, err)
@@ -385,13 +411,16 @@ func TestAvailableTimeRangeFilter(t *testing.T) {
)
require.NoError(t, err)
require.NotNil(t, res)
- require.Equal(t, 1, len(res))
- require.NotNil(t, res[testShard])
+ require.Equal(t, 1, res.Len())
+ _, ok := res.Get(testShard)
+ require.True(t, ok)
- expected := xtime.Ranges{}.
- AddRange(xtime.Range{Start: testStart, End: testStart.Add(2 * time.Hour)}).
- AddRange(xtime.Range{Start: testStart.Add(10 * time.Hour), End: testStart.Add(12 * time.Hour)})
- validateTimeRanges(t, res[testShard], expected)
+ expected := xtime.NewRanges(
+ xtime.Range{Start: testStart, End: testStart.Add(2 * time.Hour)},
+ xtime.Range{Start: testStart.Add(10 * time.Hour), End: testStart.Add(12 * time.Hour)})
+ tr, ok := res.Get(testShard)
+ require.True(t, ok)
+ validateTimeRanges(t, tr, expected)
}
func TestAvailableTimeRangePartialError(t *testing.T) {
@@ -412,13 +441,16 @@ func TestAvailableTimeRangePartialError(t *testing.T) {
)
require.NoError(t, err)
require.NotNil(t, res)
- require.Equal(t, 1, len(res))
- require.NotNil(t, res[testShard])
+ require.Equal(t, 1, res.Len())
+ _, ok := res.Get(testShard)
+ require.True(t, ok)
- expected := xtime.Ranges{}.
- AddRange(xtime.Range{Start: testStart, End: testStart.Add(2 * time.Hour)}).
- AddRange(xtime.Range{Start: testStart.Add(10 * time.Hour), End: testStart.Add(12 * time.Hour)})
- validateTimeRanges(t, res[testShard], expected)
+ expected := xtime.NewRanges(
+ xtime.Range{Start: testStart, End: testStart.Add(2 * time.Hour)},
+ xtime.Range{Start: testStart.Add(10 * time.Hour), End: testStart.Add(12 * time.Hour)})
+ tr, ok := res.Get(testShard)
+ require.True(t, ok)
+ validateTimeRanges(t, tr, expected)
}
// NB: too real :'(
@@ -435,7 +467,7 @@ func TestReadEmptyRangeErr(t *testing.T) {
src, err := newFileSystemSource(newTestOptions(t, "foo"))
require.NoError(t, err)
nsMD := testNsMetadata(t)
- tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, nil, nsMD)
+ tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, result.NewShardTimeRanges(), nsMD)
defer tester.Finish()
unfulfilledAndEmpty(t, src, nsMD, tester)
}
@@ -443,7 +475,7 @@ func TestReadEmptyRangeErr(t *testing.T) {
func TestReadPatternError(t *testing.T) {
src, err := newFileSystemSource(newTestOptions(t, "[["))
require.NoError(t, err)
- timeRanges := result.ShardTimeRanges{testShard: xtime.Ranges{}}
+ timeRanges := result.NewShardTimeRanges().Set(testShard, xtime.NewRanges())
nsMD := testNsMetadata(t)
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts,
timeRanges, nsMD)
@@ -497,10 +529,13 @@ func TestReadNilTimeRanges(t *testing.T) {
src, err := newFileSystemSource(newTestOptions(t, dir))
require.NoError(t, err)
- timeRanges := result.ShardTimeRanges{
- testShard: testTimeRanges(),
- 555: xtime.Ranges{},
- }
+ timeRanges := result.NewShardTimeRanges().Set(
+ testShard,
+ testTimeRanges(),
+ ).Set(
+ 555,
+ xtime.NewRanges(),
+ )
validateReadResults(t, src, dir, timeRanges)
}
@@ -844,3 +879,46 @@ func TestReadTags(t *testing.T) {
require.Equal(t, tags, reader.Tags)
tester.EnsureNoWrites()
}
+
+func TestReadRunMigrations(t *testing.T) {
+ dir := createTempDir(t)
+ defer os.RemoveAll(dir)
+
+ // Write existing data filesets with legacy encoding
+ eOpts := msgpack.LegacyEncodingOptions{
+ EncodeLegacyIndexInfoVersion: msgpack.LegacyEncodingIndexVersionV4, // MinorVersion 0
+ EncodeLegacyIndexEntryVersion: msgpack.LegacyEncodingIndexEntryVersionV2, // No checksum data
+ }
+ writeGoodFilesWithFsOpts(t, testNs1ID, testShard, newTestFsOptions(dir).SetEncodingOptions(eOpts))
+
+ opts := newTestOptions(t, dir)
+ sOpts, closer := newTestStorageOptions(t, opts.PersistManager())
+ defer closer()
+
+ src, err := newFileSystemSource(opts.
+ SetMigrationOptions(migration.NewOptions().
+ SetTargetMigrationVersion(migration.MigrationVersion_1_1).
+ SetConcurrency(2)). // Lower concurrency to ensure workers process more than 1 migration.
+ SetStorageOptions(sOpts))
+ require.NoError(t, err)
+
+ validateReadResults(t, src, dir, testShardTimeRanges())
+}
+
+func newTestStorageOptions(t *testing.T, pm persist.Manager) (storage.Options, index.Closer) {
+ plCache, closer, err := index.NewPostingsListCache(1, index.PostingsListCacheOptions{
+ InstrumentOptions: instrument.NewOptions(),
+ })
+ require.NoError(t, err)
+
+ md, err := namespace.NewMetadata(testNs1ID, testNamespaceOptions)
+ require.NoError(t, err)
+
+ return storage.NewOptions().
+ SetPersistManager(pm).
+ SetNamespaceInitializer(namespace.NewStaticInitializer([]namespace.Metadata{md})).
+ SetRepairEnabled(false).
+ SetIndexOptions(index.NewOptions().
+ SetPostingsListCache(plCache)).
+ SetBlockLeaseManager(block.NewLeaseManager(nil)), closer
+}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_bench_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_bench_test.go
index 5f9b763bcc..0f2886912a 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_bench_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_bench_test.go
@@ -105,7 +105,7 @@ func BenchmarkBootstrapIndex(b *testing.B) {
namespaceDataDirPath, shards)
// Clear the shard time ranges and add new ones.
- times.shardTimeRanges = make(result.ShardTimeRanges)
+ times.shardTimeRanges = result.NewShardTimeRanges()
times.start = time.Unix(0, math.MaxInt64)
times.end = time.Unix(0, 0)
for _, shard := range shards {
@@ -114,7 +114,7 @@ func BenchmarkBootstrapIndex(b *testing.B) {
max = time.Unix(0, 0)
ranges = xtime.NewRanges()
entries = fs.ReadInfoFiles(dir, testNamespace, shard,
- 0, msgpack.NewDecodingOptions())
+ 0, msgpack.NewDecodingOptions(), persist.FileSetFlushType)
)
for _, entry := range entries {
if entry.Err != nil {
@@ -132,7 +132,7 @@ func BenchmarkBootstrapIndex(b *testing.B) {
max = end
}
- ranges = ranges.AddRange(xtime.Range{Start: start, End: end})
+ ranges.AddRange(xtime.Range{Start: start, End: end})
// Override the block size if different.
namespaceOpts := testNamespaceMetadata.Options()
@@ -157,7 +157,7 @@ func BenchmarkBootstrapIndex(b *testing.B) {
continue // Nothing to bootstrap for shard.
}
- times.shardTimeRanges[shard] = ranges
+ times.shardTimeRanges.Set(shard, ranges)
if min.Before(times.start) {
times.start = min
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go
index a28f2d6674..c4b59095fd 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_index_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -31,10 +31,10 @@ import (
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
- "github.com/m3db/m3/src/dbnode/storage/bootstrap/bootstrapper"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index/convert"
"github.com/m3db/m3/src/m3ninx/index/segment/mem"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
"github.com/stretchr/testify/require"
@@ -76,12 +76,13 @@ func newTestBootstrapIndexTimes(
panic("unexpected")
}
- shardTimeRanges := map[uint32]xtime.Ranges{
- testShard: xtime.Ranges{}.AddRange(xtime.Range{
+ shardTimeRanges := result.NewShardTimeRanges().Set(
+ testShard,
+ xtime.NewRanges(xtime.Range{
Start: start,
End: end,
}),
- }
+ )
return testBootstrapIndexTimes{
start: start,
@@ -144,11 +145,11 @@ func writeTSDBPersistedIndexBlock(
shards map[uint32]struct{},
block []testSeries,
) {
- seg, err := mem.NewSegment(0, mem.NewOptions())
+ seg, err := mem.NewSegment(mem.NewOptions())
require.NoError(t, err)
for _, series := range block {
- d, err := convert.FromMetric(series.ID(), series.Tags())
+ d, err := convert.FromSeriesIDAndTags(series.ID(), series.Tags())
require.NoError(t, err)
exists, err := seg.ContainsID(series.ID().Bytes())
require.NoError(t, err)
@@ -259,44 +260,46 @@ func validateGoodTaggedSeries(
expectedSeriesByBlock := expectedTaggedSeriesWithOptions(t, start, opts)
for _, expected := range expectedSeriesByBlock {
expectedAt := xtime.ToUnixNano(expected.indexBlockStart)
- indexBlock, ok := indexResults[expectedAt]
+ indexBlockByVolumeType, ok := indexResults[expectedAt]
require.True(t, ok)
- require.Equal(t, 1, len(indexBlock.Segments()))
- for _, seg := range indexBlock.Segments() {
- reader, err := seg.Reader()
- require.NoError(t, err)
-
- docs, err := reader.AllDocs()
- require.NoError(t, err)
+ for _, indexBlock := range indexBlockByVolumeType.Iter() {
+ require.Equal(t, 1, len(indexBlock.Segments()))
+ for _, seg := range indexBlock.Segments() {
+ reader, err := seg.Segment().Reader()
+ require.NoError(t, err)
- matches := map[string]struct{}{}
- for docs.Next() {
- curr := docs.Current()
+ docs, err := reader.AllDocs()
+ require.NoError(t, err)
- _, ok := matches[string(curr.ID)]
- require.False(t, ok)
- matches[string(curr.ID)] = struct{}{}
+ matches := map[string]struct{}{}
+ for docs.Next() {
+ curr := docs.Current()
- series, ok := expected.series[string(curr.ID)]
- require.True(t, ok)
-
- matchingTags := map[string]struct{}{}
- for _, tag := range curr.Fields {
- _, ok := matchingTags[string(tag.Name)]
+ _, ok := matches[string(curr.ID)]
require.False(t, ok)
- matchingTags[string(tag.Name)] = struct{}{}
+ matches[string(curr.ID)] = struct{}{}
- tagValue, ok := series.tags[string(tag.Name)]
+ series, ok := expected.series[string(curr.ID)]
require.True(t, ok)
- require.Equal(t, tagValue, string(tag.Value))
+ matchingTags := map[string]struct{}{}
+ for _, tag := range curr.Fields {
+ _, ok := matchingTags[string(tag.Name)]
+ require.False(t, ok)
+ matchingTags[string(tag.Name)] = struct{}{}
+
+ tagValue, ok := series.tags[string(tag.Name)]
+ require.True(t, ok)
+
+ require.Equal(t, tagValue, string(tag.Value))
+ }
+ require.Equal(t, len(series.tags), len(matchingTags))
}
- require.Equal(t, len(series.tags), len(matchingTags))
- }
- require.NoError(t, docs.Err())
- require.NoError(t, docs.Close())
+ require.NoError(t, docs.Err())
+ require.NoError(t, docs.Close())
- require.Equal(t, len(expected.series), len(matches))
+ require.Equal(t, len(expected.series), len(matches))
+ }
}
}
}
@@ -350,18 +353,22 @@ func TestBootstrapIndex(t *testing.T) {
}
// Check that the segment is not a mutable segment for this block
- block, ok := indexResults[xtime.ToUnixNano(times.start)]
+ blockByVolumeType, ok := indexResults[xtime.ToUnixNano(times.start)]
+ require.True(t, ok)
+ block, ok := blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok := block.Segments()[0].(*bootstrapper.Segment)
+ segment := block.Segments()[0]
require.True(t, ok)
require.True(t, segment.IsPersisted())
// Check that the second segment is mutable and was not written out
- block, ok = indexResults[xtime.ToUnixNano(times.start.Add(testIndexBlockSize))]
+ blockByVolumeType, ok = indexResults[xtime.ToUnixNano(times.start.Add(testIndexBlockSize))]
+ require.True(t, ok)
+ block, ok = blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok = block.Segments()[0].(*bootstrapper.Segment)
+ segment = block.Segments()[0]
require.True(t, ok)
require.False(t, segment.IsPersisted())
@@ -416,17 +423,21 @@ func TestBootstrapIndexIgnoresPersistConfigIfSnapshotType(t *testing.T) {
require.Equal(t, 0, len(infoFiles))
// Check that both segments are mutable
- block, ok := indexResults[xtime.ToUnixNano(times.start)]
+ blockByVolumeType, ok := indexResults[xtime.ToUnixNano(times.start)]
+ require.True(t, ok)
+ block, ok := blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok := block.Segments()[0].(*bootstrapper.Segment)
+ segment := block.Segments()[0]
require.True(t, ok)
require.False(t, segment.IsPersisted())
- block, ok = indexResults[xtime.ToUnixNano(times.start.Add(testIndexBlockSize))]
+ blockByVolumeType, ok = indexResults[xtime.ToUnixNano(times.start.Add(testIndexBlockSize))]
+ require.True(t, ok)
+ block, ok = blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok = block.Segments()[0].(*bootstrapper.Segment)
+ segment = block.Segments()[0]
require.True(t, ok)
require.False(t, segment.IsPersisted())
@@ -481,18 +492,22 @@ func TestBootstrapIndexWithPersistPrefersPersistedIndexBlocks(t *testing.T) {
// Check that the segment is not a mutable segment for this block
// and came from disk
- block, ok := indexResults[xtime.ToUnixNano(times.start)]
+ blockByVolumeType, ok := indexResults[xtime.ToUnixNano(times.start)]
+ require.True(t, ok)
+ block, ok := blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok := block.Segments()[0].(*bootstrapper.Segment)
+ segment := block.Segments()[0]
require.True(t, ok)
require.True(t, segment.IsPersisted())
// Check that the second segment is mutable
- block, ok = indexResults[xtime.ToUnixNano(times.start.Add(testIndexBlockSize))]
+ blockByVolumeType, ok = indexResults[xtime.ToUnixNano(times.start.Add(testIndexBlockSize))]
+ require.True(t, ok)
+ block, ok = blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok = block.Segments()[0].(*bootstrapper.Segment)
+ segment = block.Segments()[0]
require.True(t, ok)
require.False(t, segment.IsPersisted())
@@ -566,12 +581,13 @@ func TestBootstrapIndexWithPersistForIndexBlockAtRetentionEdge(t *testing.T) {
require.NoError(t, err)
// NB(bodu): Simulate requesting bootstrapping of two whole index blocks instead of 3 data blocks (1.5 index blocks).
- times.shardTimeRanges = map[uint32]xtime.Ranges{
- testShard: xtime.Ranges{}.AddRange(xtime.Range{
+ times.shardTimeRanges = result.NewShardTimeRanges().Set(
+ testShard,
+ xtime.NewRanges(xtime.Range{
Start: firstIndexBlockStart,
End: times.end,
}),
- }
+ )
tester := bootstrap.BuildNamespacesTester(t, runOpts,
times.shardTimeRanges, ns)
defer tester.Finish()
@@ -609,18 +625,22 @@ func TestBootstrapIndexWithPersistForIndexBlockAtRetentionEdge(t *testing.T) {
}
// Check that the segment is not a mutable segment
- block, ok := indexResults[xtime.ToUnixNano(firstIndexBlockStart)]
+ blockByVolumeType, ok := indexResults[xtime.ToUnixNano(firstIndexBlockStart)]
+ require.True(t, ok)
+ block, ok := blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok := block.Segments()[0].(*bootstrapper.Segment)
+ segment := block.Segments()[0]
require.True(t, ok)
require.True(t, segment.IsPersisted())
// Check that the second is not a mutable segment
- block, ok = indexResults[xtime.ToUnixNano(firstIndexBlockStart.Add(testIndexBlockSize))]
+ blockByVolumeType, ok = indexResults[xtime.ToUnixNano(firstIndexBlockStart.Add(testIndexBlockSize))]
+ require.True(t, ok)
+ block, ok = blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
require.Equal(t, 1, len(block.Segments()))
- segment, ok = block.Segments()[0].(*bootstrapper.Segment)
+ segment = block.Segments()[0]
require.True(t, ok)
require.True(t, segment.IsPersisted())
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go
index 28c3a0541a..2720684851 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go
@@ -23,8 +23,9 @@ package fs
import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/persist/fs/migration"
"github.com/m3db/m3/src/dbnode/runtime"
- "github.com/m3db/m3/src/dbnode/storage/block"
+ "github.com/m3db/m3/src/dbnode/storage"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
@@ -85,23 +86,6 @@ type Options interface {
// work for bootstrapping data file sets.
BoostrapIndexNumProcessors() int
- // SetDatabaseBlockRetrieverManager sets the block retriever manager to
- // use when bootstrapping retrievable blocks instead of blocks
- // containing data.
- // If you don't wish to bootstrap retrievable blocks instead of
- // blocks containing data then do not set this manager.
- // You can opt into which namespace you wish to have this enabled for
- // by returning nil instead of a result when creating a new block retriever
- // for a namespace from the manager.
- SetDatabaseBlockRetrieverManager(
- value block.DatabaseBlockRetrieverManager,
- ) Options
-
- // NewBlockRetrieverFn returns the new block retriever constructor to
- // use when bootstrapping retrievable blocks instead of blocks
- // containing data.
- DatabaseBlockRetrieverManager() block.DatabaseBlockRetrieverManager
-
// SetRuntimeOptionsManager sets the runtime options manager.
SetRuntimeOptionsManager(value runtime.OptionsManager) Options
@@ -119,4 +103,16 @@ type Options interface {
// IndexOptions returns the indexing options.
IndexOptions() index.Options
+
+ // SetMigrationOptions sets the migration options.
+ SetMigrationOptions(value migration.Options) Options
+
+ // MigrationOptions gets the migration options.
+ MigrationOptions() migration.Options
+
+ // SetStorageOptions sets storage options.
+ SetStorageOptions(value storage.Options) Options
+
+ // StorageOptions gets the storage options.
+ StorageOptions() storage.Options
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/noop.go b/src/dbnode/storage/bootstrap/bootstrapper/noop.go
index 24aad52fd5..e8e2aaf8f1 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/noop.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/noop.go
@@ -24,6 +24,7 @@ import (
"fmt"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
+ "github.com/m3db/m3/src/x/context"
)
const (
@@ -61,6 +62,7 @@ func (noop noOpNoneBootstrapper) String() string {
}
func (noop noOpNoneBootstrapper) Bootstrap(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
results := bootstrap.NewNamespaceResults(namespaces)
@@ -117,6 +119,7 @@ func (noop noOpAllBootstrapper) String() string {
}
func (noop noOpAllBootstrapper) Bootstrap(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
return bootstrap.NewNamespaceResults(namespaces), nil
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go
index 0643a3facb..1d592dd03e 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go
@@ -29,7 +29,6 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
- "github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
@@ -38,8 +37,16 @@ import (
)
var (
- defaultDefaultShardConcurrency = runtime.NumCPU()
- defaultShardPersistenceConcurrency = int(math.Max(1, float64(runtime.NumCPU())/2))
+ // DefaultShardConcurrency controls how many shards in parallel to stream
+ // for in memory data being streamed between peers (most recent block).
+ // Update BootstrapPeersConfiguration comment in
+ // src/cmd/services/m3dbnode/config package if this is changed.
+ DefaultShardConcurrency = runtime.NumCPU()
+ // DefaultShardPersistenceConcurrency controls how many shards in parallel to stream
+ // for historical data being streamed between peers (historical blocks).
+ // Update BootstrapPeersConfiguration comment in
+ // src/cmd/services/m3dbnode/config package if this is changed.
+ DefaultShardPersistenceConcurrency = int(math.Max(1, float64(runtime.NumCPU())/2))
defaultPersistenceMaxQueueSize = 0
)
@@ -59,7 +66,6 @@ type options struct {
shardPersistenceConcurrency int
persistenceMaxQueueSize int
persistManager persist.Manager
- blockRetrieverManager block.DatabaseBlockRetrieverManager
runtimeOptionsManager m3dbruntime.OptionsManager
contextPool context.Pool
fsOpts fs.Options
@@ -71,8 +77,8 @@ type options struct {
func NewOptions() Options {
return &options{
resultOpts: result.NewOptions(),
- defaultShardConcurrency: defaultDefaultShardConcurrency,
- shardPersistenceConcurrency: defaultShardPersistenceConcurrency,
+ defaultShardConcurrency: DefaultShardConcurrency,
+ shardPersistenceConcurrency: DefaultShardPersistenceConcurrency,
persistenceMaxQueueSize: defaultPersistenceMaxQueueSize,
// Use a zero pool, this should be overriden at config time.
contextPool: context.NewPool(context.NewOptions().
@@ -173,18 +179,6 @@ func (o *options) Compactor() *compaction.Compactor {
return o.compactor
}
-func (o *options) SetDatabaseBlockRetrieverManager(
- value block.DatabaseBlockRetrieverManager,
-) Options {
- opts := *o
- opts.blockRetrieverManager = value
- return &opts
-}
-
-func (o *options) DatabaseBlockRetrieverManager() block.DatabaseBlockRetrieverManager {
- return o.blockRetrieverManager
-}
-
func (o *options) SetRuntimeOptionsManager(value m3dbruntime.OptionsManager) Options {
opts := *o
opts.runtimeOptionsManager = value
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
index e78f323595..d76efed0df 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
@@ -39,13 +39,16 @@ import (
"github.com/m3db/m3/src/dbnode/storage/index/convert"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/m3ninx/doc"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
-
"github.com/m3db/m3/src/x/instrument"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/opentracing/opentracing-go"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
@@ -56,15 +59,13 @@ type peersSource struct {
nowFn clock.NowFn
persistManager *bootstrapper.SharedPersistManager
compactor *bootstrapper.SharedCompactor
- builder *result.IndexBuilder
}
type persistenceFlush struct {
- nsMetadata namespace.Metadata
- shard uint32
- shardRetrieverMgr block.DatabaseShardBlockRetrieverManager
- shardResult result.ShardResult
- timeRange xtime.Range
+ nsMetadata namespace.Metadata
+ shard uint32
+ shardResult result.ShardResult
+ timeRange xtime.Range
}
func newPeersSource(opts Options) (bootstrap.Source, error) {
@@ -73,11 +74,6 @@ func newPeersSource(opts Options) (bootstrap.Source, error) {
}
iopts := opts.ResultOptions().InstrumentOptions()
- alloc := opts.ResultOptions().IndexDocumentsBuilderAllocator()
- segBuilder, err := alloc()
- if err != nil {
- return nil, err
- }
return &peersSource{
opts: opts,
log: iopts.Logger().With(zap.String("bootstrapper", "peers")),
@@ -88,7 +84,6 @@ func newPeersSource(opts Options) (bootstrap.Source, error) {
compactor: &bootstrapper.SharedCompactor{
Compactor: opts.Compactor(),
},
- builder: result.NewIndexBuilder(segBuilder),
}, nil
}
@@ -99,38 +94,59 @@ type shardPeerAvailability struct {
func (s *peersSource) AvailableData(
nsMetadata namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
runOpts bootstrap.RunOptions,
) (result.ShardTimeRanges, error) {
if err := s.validateRunOpts(runOpts); err != nil {
return nil, err
}
- return s.peerAvailability(nsMetadata, shardsTimeRanges, runOpts)
+ return s.peerAvailability(nsMetadata, shardTimeRanges, runOpts)
}
func (s *peersSource) AvailableIndex(
nsMetadata namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
runOpts bootstrap.RunOptions,
) (result.ShardTimeRanges, error) {
if err := s.validateRunOpts(runOpts); err != nil {
return nil, err
}
- return s.peerAvailability(nsMetadata, shardsTimeRanges, runOpts)
+ return s.peerAvailability(nsMetadata, shardTimeRanges, runOpts)
}
func (s *peersSource) Read(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
+ ctx, span, _ := ctx.StartSampledTraceSpan(tracepoint.BootstrapperPeersSourceRead)
+ defer span.Finish()
+
+ timeRangesEmpty := true
+ for _, elem := range namespaces.Namespaces.Iter() {
+ namespace := elem.Value()
+ dataRangesNotEmpty := !namespace.DataRunOptions.ShardTimeRanges.IsEmpty()
+
+ indexEnabled := namespace.Metadata.Options().IndexOptions().Enabled()
+ indexRangesNotEmpty := indexEnabled && !namespace.IndexRunOptions.ShardTimeRanges.IsEmpty()
+ if dataRangesNotEmpty || indexRangesNotEmpty {
+ timeRangesEmpty = false
+ break
+ }
+ }
+ if timeRangesEmpty {
+ // Return empty result with no unfulfilled ranges.
+ return bootstrap.NewNamespaceResults(namespaces), nil
+ }
+
results := bootstrap.NamespaceResults{
Results: bootstrap.NewNamespaceResultsMap(bootstrap.NamespaceResultsMapOptions{}),
}
// NB(r): Perform all data bootstrapping first then index bootstrapping
// to more clearly deliniate which process is slower than the other.
- nowFn := s.opts.ResultOptions().ClockOptions().NowFn()
- start := nowFn()
+ start := s.nowFn()
s.log.Info("bootstrapping time series data start")
+ span.LogEvent("bootstrap_data_start")
for _, elem := range namespaces.Namespaces.Iter() {
namespace := elem.Value()
md := namespace.Metadata
@@ -149,10 +165,19 @@ func (s *peersSource) Read(
})
}
s.log.Info("bootstrapping time series data success",
- zap.Duration("took", nowFn().Sub(start)))
+ zap.Duration("took", s.nowFn().Sub(start)))
+ span.LogEvent("bootstrap_data_done")
+
+ alloc := s.opts.ResultOptions().IndexDocumentsBuilderAllocator()
+ segBuilder, err := alloc()
+ if err != nil {
+ return bootstrap.NamespaceResults{}, err
+ }
+ builder := result.NewIndexBuilder(segBuilder)
- start = nowFn()
+ start = s.nowFn()
s.log.Info("bootstrapping index metadata start")
+ span.LogEvent("bootstrap_index_start")
for _, elem := range namespaces.Namespaces.Iter() {
namespace := elem.Value()
md := namespace.Metadata
@@ -166,7 +191,9 @@ func (s *peersSource) Read(
r, err := s.readIndex(md,
namespace.IndexRunOptions.ShardTimeRanges,
- namespace.IndexRunOptions.RunOptions)
+ builder,
+ namespace.IndexRunOptions.RunOptions,
+ span)
if err != nil {
return bootstrap.NamespaceResults{}, err
}
@@ -183,7 +210,8 @@ func (s *peersSource) Read(
results.Results.Set(md.ID(), result)
}
s.log.Info("bootstrapping index metadata success",
- zap.Duration("took", nowFn().Sub(start)))
+ zap.Duration("took", s.nowFn().Sub(start)))
+ span.LogEvent("bootstrap_index_done")
return results, nil
}
@@ -191,22 +219,21 @@ func (s *peersSource) Read(
func (s *peersSource) readData(
nsMetadata namespace.Metadata,
accumulator bootstrap.NamespaceDataAccumulator,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
opts bootstrap.RunOptions,
) (result.DataBootstrapResult, error) {
if err := s.validateRunOpts(opts); err != nil {
return nil, err
}
- if shardsTimeRanges.IsEmpty() {
+ if shardTimeRanges.IsEmpty() {
return result.NewDataBootstrapResult(), nil
}
var (
- namespace = nsMetadata.ID()
- shardRetrieverMgr block.DatabaseShardBlockRetrieverManager
- persistFlush persist.FlushPreparer
- shouldPersist = false
+ namespace = nsMetadata.ID()
+ persistFlush persist.FlushPreparer
+ shouldPersist = false
// TODO(bodu): We should migrate to series.CacheLRU only.
seriesCachePolicy = s.opts.ResultOptions().SeriesCachePolicy()
persistConfig = opts.PersistConfig()
@@ -215,24 +242,15 @@ func (s *peersSource) readData(
if persistConfig.Enabled &&
(seriesCachePolicy == series.CacheRecentlyRead || seriesCachePolicy == series.CacheLRU) &&
persistConfig.FileSetType == persist.FileSetFlushType {
- retrieverMgr := s.opts.DatabaseBlockRetrieverManager()
persistManager := s.opts.PersistManager()
// Neither of these should ever happen
- if seriesCachePolicy != series.CacheAll && retrieverMgr == nil {
- s.log.Fatal("tried to perform a bootstrap with persistence without retriever manager")
- }
if seriesCachePolicy != series.CacheAll && persistManager == nil {
s.log.Fatal("tried to perform a bootstrap with persistence without persist manager")
}
s.log.Info("peers bootstrapper resolving block retriever", zap.Stringer("namespace", namespace))
- r, err := retrieverMgr.Retriever(nsMetadata)
- if err != nil {
- return nil, err
- }
-
persist, err := persistManager.StartFlushPersist()
if err != nil {
return nil, err
@@ -241,7 +259,6 @@ func (s *peersSource) readData(
defer persist.DoneFlush()
shouldPersist = true
- shardRetrieverMgr = block.NewDatabaseShardBlockRetrieverManager(r)
persistFlush = persist
}
@@ -249,7 +266,7 @@ func (s *peersSource) readData(
session, err := s.opts.AdminClient().DefaultAdminSession()
if err != nil {
s.log.Error("peers bootstrapper cannot get default admin session", zap.Error(err))
- result.SetUnfulfilled(shardsTimeRanges)
+ result.SetUnfulfilled(shardTimeRanges)
return nil, err
}
@@ -260,7 +277,7 @@ func (s *peersSource) readData(
persistenceMaxQueueSize = s.opts.PersistenceMaxQueueSize()
persistenceQueue = make(chan persistenceFlush, persistenceMaxQueueSize)
resultOpts = s.opts.ResultOptions()
- count = len(shardsTimeRanges)
+ count = shardTimeRanges.Len()
concurrency = s.opts.DefaultShardConcurrency()
blockSize = nsMetadata.Options().RetentionOptions().BlockSize()
)
@@ -279,14 +296,14 @@ func (s *peersSource) readData(
workers := xsync.NewWorkerPool(concurrency)
workers.Init()
- for shard, ranges := range shardsTimeRanges {
+ for shard, ranges := range shardTimeRanges.Iter() {
shard, ranges := shard, ranges
wg.Add(1)
workers.Go(func() {
defer wg.Done()
s.fetchBootstrapBlocksFromPeers(shard, ranges, nsMetadata, session,
accumulator, resultOpts, result, &resultLock, shouldPersist,
- persistenceQueue, shardRetrieverMgr, blockSize)
+ persistenceQueue, blockSize)
})
}
@@ -317,7 +334,7 @@ func (s *peersSource) startPersistenceQueueWorkerLoop(
// at a time as shard results are gathered.
for flush := range persistenceQueue {
err := s.flush(opts, persistFlush, flush.nsMetadata, flush.shard,
- flush.shardRetrieverMgr, flush.shardResult, flush.timeRange)
+ flush.shardResult, flush.timeRange)
if err == nil {
continue
}
@@ -329,9 +346,10 @@ func (s *peersSource) startPersistenceQueueWorkerLoop(
// Make unfulfilled.
lock.Lock()
unfulfilled := bootstrapResult.Unfulfilled().Copy()
- unfulfilled.AddRanges(result.ShardTimeRanges{
- flush.shard: xtime.NewRanges(flush.timeRange),
- })
+ unfulfilled.AddRanges(result.NewShardTimeRanges().Set(
+ flush.shard,
+ xtime.NewRanges(flush.timeRange),
+ ))
bootstrapResult.SetUnfulfilled(unfulfilled)
lock.Unlock()
}
@@ -355,7 +373,6 @@ func (s *peersSource) fetchBootstrapBlocksFromPeers(
lock *sync.Mutex,
shouldPersist bool,
persistenceQueue chan persistenceFlush,
- shardRetrieverMgr block.DatabaseShardBlockRetrieverManager,
blockSize time.Duration,
) {
it := ranges.Iter()
@@ -363,7 +380,7 @@ func (s *peersSource) fetchBootstrapBlocksFromPeers(
unfulfill := func(r xtime.Range) {
lock.Lock()
unfulfilled := bootstrapResult.Unfulfilled()
- unfulfilled.AddRanges(result.ShardTimeRanges{shard: xtime.NewRanges(r)})
+ unfulfilled.AddRanges(result.NewShardTimeRanges().Set(shard, xtime.NewRanges(r)))
lock.Unlock()
}
for it.Next() {
@@ -383,11 +400,10 @@ func (s *peersSource) fetchBootstrapBlocksFromPeers(
if shouldPersist {
persistenceQueue <- persistenceFlush{
- nsMetadata: nsMetadata,
- shard: shard,
- shardRetrieverMgr: shardRetrieverMgr,
- shardResult: shardResult,
- timeRange: xtime.Range{Start: blockStart, End: blockEnd},
+ nsMetadata: nsMetadata,
+ shard: shard,
+ shardResult: shardResult,
+ timeRange: xtime.Range{Start: blockStart, End: blockEnd},
}
continue
}
@@ -469,7 +485,6 @@ func (s *peersSource) flush(
flush persist.FlushPreparer,
nsMetadata namespace.Metadata,
shard uint32,
- shardRetrieverMgr block.DatabaseShardBlockRetrieverManager,
shardResult result.ShardResult,
tr xtime.Range,
) error {
@@ -533,6 +548,10 @@ func (s *peersSource) flush(
// we just peer-bootstrapped because the operator has already made it
// clear that they only want data to be returned if it came from peers
// (they made this decision by turning off the Filesystem bootstrapper).
+ // 3) We have received a shard/block we previously owned. For example, when a
+ // node was added to this replication group and was later removed.
+ // Although we take writes while bootstrapping, we do not allow flushes
+ // so it is safe to delete on disk data.
DeleteIfExists: true,
}
prepared, err := flush.PrepareData(prepareOpts)
@@ -570,7 +589,9 @@ func (s *peersSource) flush(
break
}
- err = prepared.Persist(s.ID, s.Tags, segment, checksum)
+ metadata := persist.NewMetadataFromIDAndTags(s.ID, s.Tags,
+ persist.MetadataOptions{})
+ err = prepared.Persist(metadata, segment, checksum)
flushCtx.BlockingCloseReset()
if err != nil {
blockErr = err // Need to call prepared.Close, avoid return
@@ -636,8 +657,10 @@ func (s *peersSource) flush(
func (s *peersSource) readIndex(
ns namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
+ builder *result.IndexBuilder,
opts bootstrap.RunOptions,
+ span opentracing.Span,
) (result.IndexBootstrapResult, error) {
if err := s.validateRunOpts(opts); err != nil {
return nil, err
@@ -646,12 +669,12 @@ func (s *peersSource) readIndex(
// FOLLOWUP(r): Try to reuse any metadata fetched during the ReadData(...)
// call rather than going to the network again
r := result.NewIndexBootstrapResult()
- if shardsTimeRanges.IsEmpty() {
+ if shardTimeRanges.IsEmpty() {
return r, nil
}
var (
- count = len(shardsTimeRanges)
+ count = shardTimeRanges.Len()
indexBlockSize = ns.Options().IndexOptions().BlockSize()
runtimeOpts = s.opts.RuntimeOptionsManager().Get()
fsOpts = s.opts.FilesystemOptions()
@@ -669,18 +692,33 @@ func (s *peersSource) readIndex(
zap.Int("shards", count),
)
- go bootstrapper.EnqueueReaders(ns, opts, runtimeOpts, fsOpts, shardsTimeRanges, readerPool,
- readersCh, indexBlockSize, s.log)
+ go bootstrapper.EnqueueReaders(bootstrapper.EnqueueReadersOptions{
+ NsMD: ns,
+ RunOpts: opts,
+ RuntimeOpts: runtimeOpts,
+ FsOpts: fsOpts,
+ ShardTimeRanges: shardTimeRanges,
+ ReaderPool: readerPool,
+ ReadersCh: readersCh,
+ BlockSize: indexBlockSize,
+ // NB(bodu): We only read metadata when performing a peers bootstrap
+ // so we do not need to sort the data fileset reader.
+ OptimizedReadMetadataOnly: true,
+ Logger: s.log,
+ Span: span,
+ NowFn: s.nowFn,
+ })
for timeWindowReaders := range readersCh {
// NB(bodu): Since we are re-using the same builder for all bootstrapped index blocks,
// it is not thread safe and requires reset after every processed index block.
- s.builder.Builder().Reset(0)
+ builder.Builder().Reset()
// NB(bodu): This is fetching the data for all shards for a block of time.
remainingRanges, timesWithErrors := s.processReaders(
ns,
r,
+ builder,
timeWindowReaders,
readerPool,
idxOpts,
@@ -695,6 +733,7 @@ func (s *peersSource) readIndex(
func (s *peersSource) readNextEntryAndMaybeIndex(
r fs.DataFileSetReader,
batch []doc.Document,
+ builder *result.IndexBuilder,
) ([]doc.Document, error) {
// If performing index run, then simply read the metadata and add to segment.
id, tagsIter, _, _, err := r.ReadMetadata()
@@ -702,7 +741,7 @@ func (s *peersSource) readNextEntryAndMaybeIndex(
return batch, err
}
- d, err := convert.FromMetricIter(id, tagsIter)
+ d, err := convert.FromSeriesIDAndTagIter(id, tagsIter)
// Finalize the ID and tags.
id.Finalize()
tagsIter.Close()
@@ -713,7 +752,7 @@ func (s *peersSource) readNextEntryAndMaybeIndex(
batch = append(batch, d)
if len(batch) >= index.DocumentArrayPoolCapacity {
- return s.builder.FlushBatch(batch)
+ return builder.FlushBatch(batch)
}
return batch, nil
@@ -722,6 +761,7 @@ func (s *peersSource) readNextEntryAndMaybeIndex(
func (s *peersSource) processReaders(
ns namespace.Metadata,
r result.IndexBootstrapResult,
+ builder *result.IndexBuilder,
timeWindowReaders bootstrapper.TimeWindowReaders,
readerPool *bootstrapper.ReaderPool,
idxOpts namespace.IndexOptions,
@@ -750,13 +790,13 @@ func (s *peersSource) processReaders(
r.IndexResults().AddBlockIfNotExists(start, idxOpts)
numEntries := reader.Entries()
for i := 0; err == nil && i < numEntries; i++ {
- batch, err = s.readNextEntryAndMaybeIndex(reader, batch)
+ batch, err = s.readNextEntryAndMaybeIndex(reader, batch, builder)
totalEntries++
}
// NB(bodu): Only flush if we've experienced no errors up until this point.
if err == nil && len(batch) > 0 {
- batch, err = s.builder.FlushBatch(batch)
+ batch, err = builder.FlushBatch(batch)
}
// Validate the read results
@@ -766,20 +806,23 @@ func (s *peersSource) processReaders(
if err == nil {
// Mark index block as fulfilled.
- fulfilled := result.ShardTimeRanges{
- shard: xtime.Ranges{}.AddRange(timeRange),
- }
+ fulfilled := result.NewShardTimeRanges().Set(
+ shard,
+ xtime.NewRanges(timeRange),
+ )
err = r.IndexResults().MarkFulfilled(start, fulfilled,
- idxOpts)
+ // NB(bodu): By default, we always load bootstrapped data into the default index volume.
+ idxpersist.DefaultIndexVolumeType, idxOpts)
}
if err == nil {
- remainingRanges.Subtract(result.ShardTimeRanges{
- shard: xtime.Ranges{}.AddRange(timeRange),
- })
+ remainingRanges.Subtract(result.NewShardTimeRanges().Set(
+ shard,
+ xtime.NewRanges(timeRange),
+ ))
} else {
- s.log.Error(err.Error(),
- zap.String("timeRange.start", fmt.Sprintf("%v", start)))
+ s.log.Error("error processing readers", zap.Error(err),
+ zap.Time("timeRange.start", start))
timesWithErrors = append(timesWithErrors, timeRange.Start)
}
}
@@ -791,8 +834,30 @@ func (s *peersSource) processReaders(
// Only persist to disk if the requested ranges were completely fulfilled.
// Otherwise, this is the latest index segment and should only exist in mem.
- shouldPersist := remainingRanges.IsEmpty()
- min, max := requestedRanges.MinMax()
+ var (
+ iopts = s.opts.ResultOptions().InstrumentOptions()
+ shouldPersist = remainingRanges.IsEmpty()
+ min, max = requestedRanges.MinMax()
+ indexBlockSize = ns.Options().IndexOptions().BlockSize()
+ blockStart = min.Truncate(indexBlockSize)
+ blockEnd = blockStart.Add(indexBlockSize)
+ indexBlock result.IndexBlock
+ err error
+ )
+
+ // NB(bodu): Assume if we're bootstrapping data from disk that it is the "default" index volume type.
+ existingIndexBlock, ok := bootstrapper.GetDefaultIndexBlockForBlockStart(r.IndexResults(), blockStart)
+ if !ok {
+ err := fmt.Errorf("could not find index block in results: time=%s, ts=%d",
+ blockStart.String(), blockStart.UnixNano())
+ instrument.EmitAndLogInvariantViolation(iopts, func(l *zap.Logger) {
+ l.Error("peers bootstrap failed",
+ zap.Error(err),
+ zap.Stringer("namespace", ns.ID()),
+ zap.Stringer("requestedRanges", requestedRanges))
+ })
+ }
+
buildIndexLogFields := []zapcore.Field{
zap.Bool("shouldPersist", shouldPersist),
zap.Int("totalEntries", totalEntries),
@@ -801,16 +866,18 @@ func (s *peersSource) processReaders(
zap.String("remainingRanges", remainingRanges.SummaryString()),
}
if shouldPersist {
- s.log.Info("building file set index segment", buildIndexLogFields...)
- if err := bootstrapper.PersistBootstrapIndexSegment(
+ s.log.Debug("building file set index segment", buildIndexLogFields...)
+ indexBlock, err = bootstrapper.PersistBootstrapIndexSegment(
ns,
requestedRanges,
- r.IndexResults(),
- s.builder.Builder(),
+ builder.Builder(),
s.persistManager,
s.opts.ResultOptions(),
- ); err != nil {
- iopts := s.opts.ResultOptions().InstrumentOptions()
+ existingIndexBlock.Fulfilled(),
+ blockStart,
+ blockEnd,
+ )
+ if err != nil {
instrument.EmitAndLogInvariantViolation(iopts, func(l *zap.Logger) {
l.Error("persist fs index bootstrap failed",
zap.Stringer("namespace", ns.ID()),
@@ -820,15 +887,17 @@ func (s *peersSource) processReaders(
}
} else {
s.log.Info("building in-memory index segment", buildIndexLogFields...)
- if err := bootstrapper.BuildBootstrapIndexSegment(
+ indexBlock, err = bootstrapper.BuildBootstrapIndexSegment(
ns,
requestedRanges,
- r.IndexResults(),
- s.builder.Builder(),
+ builder.Builder(),
s.compactor,
s.opts.ResultOptions(),
s.opts.IndexOptions().MmapReporter(),
- ); err != nil {
+ blockStart,
+ blockEnd,
+ )
+ if err != nil {
iopts := s.opts.ResultOptions().InstrumentOptions()
instrument.EmitAndLogInvariantViolation(iopts, func(l *zap.Logger) {
l.Error("build fs index bootstrap failed",
@@ -839,6 +908,17 @@ func (s *peersSource) processReaders(
}
}
+ // Merge segments and fulfilled time ranges.
+ segments := indexBlock.Segments()
+ for _, seg := range existingIndexBlock.Segments() {
+ segments = append(segments, seg)
+ }
+ newFulfilled := existingIndexBlock.Fulfilled().Copy()
+ newFulfilled.AddRanges(indexBlock.Fulfilled())
+
+ // Replace index block for default index volume type.
+ r.IndexResults()[xtime.ToUnixNano(blockStart)].SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock(segments, newFulfilled))
+
// Return readers to pool.
for _, shardReaders := range timeWindowReaders.Readers {
for _, r := range shardReaders.Readers {
@@ -890,7 +970,7 @@ func (s *peersSource) readBlockMetadataAndIndex(
batch []doc.Document,
flushBatch func() error,
) (bool, error) {
- d, err := convert.FromMetric(dataBlock.ID, dataBlock.Tags)
+ d, err := convert.FromSeriesIDAndTags(dataBlock.ID, dataBlock.Tags)
if err != nil {
return false, err
}
@@ -905,7 +985,7 @@ func (s *peersSource) readBlockMetadataAndIndex(
func (s *peersSource) peerAvailability(
nsMetadata namespace.Metadata,
- shardsTimeRanges result.ShardTimeRanges,
+ shardTimeRanges result.ShardTimeRanges,
runOpts bootstrap.RunOptions,
) (result.ShardTimeRanges, error) {
var (
@@ -913,7 +993,7 @@ func (s *peersSource) peerAvailability(
initialTopologyState = runOpts.InitialTopologyState()
)
- for shardIDUint := range shardsTimeRanges {
+ for shardIDUint := range shardTimeRanges.Iter() {
shardID := topology.ShardID(shardIDUint)
shardPeers, ok := peerAvailabilityByShard[shardID]
if !ok {
@@ -958,9 +1038,9 @@ func (s *peersSource) peerAvailability(
runtimeOpts = s.opts.RuntimeOptionsManager().Get()
bootstrapConsistencyLevel = runtimeOpts.ClientBootstrapConsistencyLevel()
majorityReplicas = initialTopologyState.MajorityReplicas
- availableShardTimeRanges = result.ShardTimeRanges{}
+ availableShardTimeRanges = result.NewShardTimeRanges()
)
- for shardIDUint := range shardsTimeRanges {
+ for shardIDUint := range shardTimeRanges.Iter() {
var (
shardID = topology.ShardID(shardIDUint)
shardPeers = peerAvailabilityByShard[shardID]
@@ -992,7 +1072,9 @@ func (s *peersSource) peerAvailability(
// all the data. This assumption is safe, as the shard/block ranges
// will simply be marked unfulfilled if the peers are not able to
// satisfy the requests.
- availableShardTimeRanges[shardIDUint] = shardsTimeRanges[shardIDUint]
+ if tr, ok := shardTimeRanges.Get(shardIDUint); ok {
+ availableShardTimeRanges.Set(shardIDUint, tr)
+ }
}
return availableShardTimeRanges, nil
@@ -1007,7 +1089,7 @@ func (s *peersSource) markIndexResultErrorAsUnfulfilled(
) {
// NB(r): We explicitly do not remove entries from the index results
// as they are additive and get merged together with results from other
- // bootstrappers by just appending the result (unlike data bootstrap
+ // bootstrappers by just appending the result (ounlike data bootstrap
// results that when merged replace the block with the current block).
// It would also be difficult to remove only series that were added to the
// index block as results from a specific data block can be subsets of the
@@ -1016,10 +1098,11 @@ func (s *peersSource) markIndexResultErrorAsUnfulfilled(
resultLock.Lock()
defer resultLock.Unlock()
- unfulfilled := result.ShardTimeRanges{
- shard: xtime.NewRanges(timeRange),
- }
- r.Add(result.IndexBlock{}, unfulfilled)
+ unfulfilled := result.NewShardTimeRanges().Set(
+ shard,
+ xtime.NewRanges(timeRange),
+ )
+ r.Add(result.NewIndexBlockByVolumeType(time.Time{}), unfulfilled)
}
func (s *peersSource) validateRunOpts(runOpts bootstrap.RunOptions) error {
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go
index cbb14f6f98..8675d5d49f 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go
@@ -42,6 +42,7 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
@@ -142,7 +143,7 @@ func TestPeersSourceEmptyShardTimeRanges(t *testing.T) {
var (
nsMetadata = testNamespaceMetadata(t)
- target = result.ShardTimeRanges{}
+ target = result.NewShardTimeRanges()
runOpts = testDefaultRunOpts.SetInitialTopologyState(&topology.StateSnapshot{})
)
available, err := src.AvailableData(nsMetadata, target, runOpts)
@@ -176,14 +177,21 @@ func TestPeersSourceReturnsErrorForAdminSession(t *testing.T) {
start := time.Now().Add(-ropts.RetentionPeriod()).Truncate(ropts.BlockSize())
end := start.Add(ropts.BlockSize())
- target := result.ShardTimeRanges{
- 0: xtime.NewRanges(xtime.Range{Start: start, End: end}),
- 1: xtime.NewRanges(xtime.Range{Start: start, End: end}),
- }
+ target := result.NewShardTimeRanges().Set(
+ 0,
+ xtime.NewRanges(xtime.Range{Start: start, End: end}),
+ ).Set(
+ 1,
+ xtime.NewRanges(xtime.Range{Start: start, End: end}),
+ )
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, target, nsMetadata)
defer tester.Finish()
- _, err = src.Read(tester.Namespaces)
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ _, err = src.Read(ctx, tester.Namespaces)
require.Error(t, err)
assert.Equal(t, expectedErr, err)
tester.EnsureNoLoadedBlocks()
@@ -228,9 +236,10 @@ func TestPeersSourceReturnsUnfulfilled(t *testing.T) {
src, err := newPeersSource(opts)
require.NoError(t, err)
- target := result.ShardTimeRanges{
- 0: xtime.NewRanges(xtime.Range{Start: start, End: end}),
- }
+ target := result.NewShardTimeRanges().Set(
+ 0,
+ xtime.NewRanges(xtime.Range{Start: start, End: end}),
+ )
tester := bootstrap.BuildNamespacesTester(t, testDefaultRunOpts, target, nsMetadata)
defer tester.Finish()
@@ -269,10 +278,10 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
shard0ResultBlock1 := result.NewShardResult(0, opts.ResultOptions())
shard0ResultBlock2 := result.NewShardResult(0, opts.ResultOptions())
fooBlock := block.NewDatabaseBlock(start, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{1, 2, 3}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{1, 2, 3}, nil), nil, 1, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
barBlock := block.NewDatabaseBlock(start.Add(ropts.BlockSize()), ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{4, 5, 6}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{4, 5, 6}, nil), nil, 2, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
shard0ResultBlock1.AddBlock(ident.StringID("foo"), ident.NewTags(ident.StringTag("foo", "oof")), fooBlock)
shard0ResultBlock2.AddBlock(ident.StringID("bar"), ident.NewTags(ident.StringTag("bar", "rab")), barBlock)
@@ -280,7 +289,7 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
shard1ResultBlock1 := result.NewShardResult(0, opts.ResultOptions())
shard1ResultBlock2 := result.NewShardResult(0, opts.ResultOptions())
bazBlock := block.NewDatabaseBlock(start, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{7, 8, 9}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{7, 8, 9}, nil), nil, 3, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
shard1ResultBlock1.AddBlock(ident.StringID("baz"), ident.NewTags(ident.StringTag("baz", "zab")), bazBlock)
@@ -315,15 +324,6 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
opts = opts.SetAdminClient(mockAdminClient)
- mockRetriever := block.NewMockDatabaseBlockRetriever(ctrl)
-
- mockRetrieverMgr := block.NewMockDatabaseBlockRetrieverManager(ctrl)
- mockRetrieverMgr.EXPECT().
- Retriever(namespace.NewMetadataMatcher(testNsMd)).
- Return(mockRetriever, nil)
-
- opts = opts.SetDatabaseBlockRetrieverManager(mockRetrieverMgr)
-
flushPreparer := persist.NewMockFlushPreparer(ctrl)
flushPreparer.EXPECT().DoneFlush()
persists := make(map[string]int)
@@ -337,9 +337,9 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
flushPreparer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["foo"]++
- assert.Equal(t, "foo", id.String())
+ assert.Equal(t, "foo", string(metadata.BytesID()))
assert.Equal(t, []byte{1, 2, 3}, segment.Head.Bytes())
assertBlockChecksum(t, checksum, fooBlock)
return nil
@@ -358,9 +358,9 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
flushPreparer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["bar"]++
- assert.Equal(t, "bar", id.String())
+ assert.Equal(t, "bar", string(metadata.BytesID()))
assert.Equal(t, []byte{4, 5, 6}, segment.Head.Bytes())
assertBlockChecksum(t, checksum, barBlock)
return nil
@@ -379,9 +379,9 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
flushPreparer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["baz"]++
- assert.Equal(t, "baz", id.String())
+ assert.Equal(t, "baz", string(metadata.BytesID()))
assert.Equal(t, []byte{7, 8, 9}, segment.Head.Bytes())
assertBlockChecksum(t, checksum, bazBlock)
return nil
@@ -400,7 +400,7 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
flushPreparer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
assert.Fail(t, "no expected shard 1 second block")
return nil
},
@@ -418,10 +418,13 @@ func TestPeersSourceRunWithPersist(t *testing.T) {
src, err := newPeersSource(opts)
require.NoError(t, err)
- target := result.ShardTimeRanges{
- 0: xtime.NewRanges(xtime.Range{Start: start, End: end}),
- 1: xtime.NewRanges(xtime.Range{Start: start, End: end}),
- }
+ target := result.NewShardTimeRanges().Set(
+ 0,
+ xtime.NewRanges(xtime.Range{Start: start, End: end}),
+ ).Set(
+ 1,
+ xtime.NewRanges(xtime.Range{Start: start, End: end}),
+ )
tester := bootstrap.BuildNamespacesTester(t, testRunOptsWithPersist, target, testNsMd)
defer tester.Finish()
@@ -483,7 +486,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
addResult(0, "foo", fooBlocks[0], true)
fooBlocks[1] = block.NewDatabaseBlock(midway, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{1, 2, 3}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{1, 2, 3}, nil), nil, 1, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
addResult(0, "foo", fooBlocks[1], false)
@@ -502,31 +505,31 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
addResult(1, "bar", barBlocks[0], false)
barBlocks[1] = block.NewDatabaseBlock(midway, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{4, 5, 6}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{4, 5, 6}, nil), nil, 2, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
addResult(1, "bar", barBlocks[1], false)
// baz results
var bazBlocks [2]block.DatabaseBlock
bazBlocks[0] = block.NewDatabaseBlock(start, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{7, 8, 9}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{7, 8, 9}, nil), nil, 3, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
addResult(2, "baz", bazBlocks[0], false)
bazBlocks[1] = block.NewDatabaseBlock(midway, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{10, 11, 12}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{10, 11, 12}, nil), nil, 4, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
addResult(2, "baz", bazBlocks[1], false)
// qux results
var quxBlocks [2]block.DatabaseBlock
quxBlocks[0] = block.NewDatabaseBlock(start, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{13, 14, 15}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{13, 14, 15}, nil), nil, 5, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
addResult(3, "qux", quxBlocks[0], false)
quxBlocks[1] = block.NewDatabaseBlock(midway, ropts.BlockSize(),
- ts.NewSegment(checked.NewBytes([]byte{16, 17, 18}, nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte{16, 17, 18}, nil), nil, 6, ts.FinalizeNone),
testBlockOpts, namespace.Context{})
addResult(3, "qux", quxBlocks[1], false)
@@ -559,14 +562,6 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
opts = opts.SetAdminClient(mockAdminClient)
- mockRetriever := block.NewMockDatabaseBlockRetriever(ctrl)
- mockRetrieverMgr := block.NewMockDatabaseBlockRetrieverManager(ctrl)
- mockRetrieverMgr.EXPECT().
- Retriever(namespace.NewMetadataMatcher(testNsMd)).
- Return(mockRetriever, nil)
-
- opts = opts.SetDatabaseBlockRetrieverManager(mockRetrieverMgr)
-
flushPreprarer := persist.NewMockFlushPreparer(ctrl)
flushPreprarer.EXPECT().DoneFlush()
@@ -583,7 +578,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
assert.Fail(t, "not expecting to flush shard 0 at start")
return nil
},
@@ -601,7 +596,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["foo"]++
return nil
},
@@ -621,7 +616,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
assert.Fail(t, "not expecting to flush shard 0 at start + block size")
return nil
},
@@ -639,7 +634,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["bar"]++
return nil
},
@@ -659,7 +654,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["baz"]++
return fmt.Errorf("a persist error")
},
@@ -677,7 +672,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["baz"]++
return nil
},
@@ -697,7 +692,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["qux"]++
return nil
},
@@ -715,7 +710,7 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
flushPreprarer.EXPECT().
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
- Persist: func(id ident.ID, _ ident.Tags, segment ts.Segment, checksum uint32) error {
+ Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
persists["qux"]++
return nil
},
@@ -733,31 +728,45 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
src, err := newPeersSource(opts)
require.NoError(t, err)
- target := result.ShardTimeRanges{
- 0: xtime.Ranges{}.
- AddRange(xtime.Range{Start: start, End: midway}).
- AddRange(xtime.Range{Start: midway, End: end}),
- 1: xtime.Ranges{}.
- AddRange(xtime.Range{Start: start, End: midway}).
- AddRange(xtime.Range{Start: midway, End: end}),
- 2: xtime.Ranges{}.
- AddRange(xtime.Range{Start: start, End: midway}).
- AddRange(xtime.Range{Start: midway, End: end}),
- 3: xtime.Ranges{}.
- AddRange(xtime.Range{Start: start, End: midway}).
- AddRange(xtime.Range{Start: midway, End: end}),
- }
+ target := result.NewShardTimeRanges().Set(
+ 0,
+ xtime.NewRanges(
+ xtime.Range{Start: start, End: midway},
+ xtime.Range{Start: midway, End: end}),
+ ).Set(
+ 1,
+ xtime.NewRanges(
+ xtime.Range{Start: start, End: midway},
+ xtime.Range{Start: midway, End: end}),
+ ).Set(
+ 2,
+ xtime.NewRanges(
+ xtime.Range{Start: start, End: midway},
+ xtime.Range{Start: midway, End: end}),
+ ).Set(
+ 3,
+ xtime.NewRanges(
+ xtime.Range{Start: start, End: midway},
+ xtime.Range{Start: midway, End: end}),
+ )
tester := bootstrap.BuildNamespacesTester(t, testRunOptsWithPersist, target, testNsMd)
defer tester.Finish()
tester.TestReadWith(src)
- expectedRanges := result.ShardTimeRanges{
- 0: xtime.Ranges{}.AddRange(xtime.Range{Start: start, End: midway}),
- 1: xtime.Ranges{}.AddRange(xtime.Range{Start: start, End: midway}),
- 2: xtime.Ranges{}.AddRange(xtime.Range{Start: start, End: midway}),
- 3: xtime.Ranges{}.AddRange(xtime.Range{Start: start, End: midway}),
- }
+ expectedRanges := result.NewShardTimeRanges().Set(
+ 0,
+ xtime.NewRanges(xtime.Range{Start: start, End: midway}),
+ ).Set(
+ 1,
+ xtime.NewRanges(xtime.Range{Start: start, End: midway}),
+ ).Set(
+ 2,
+ xtime.NewRanges(xtime.Range{Start: start, End: midway}),
+ ).Set(
+ 3,
+ xtime.NewRanges(xtime.Range{Start: start, End: midway}),
+ )
// NB(bodu): There is no time series data written to disk so all ranges fail to be fulfilled.
expectedIndexRanges := target
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go
index 5fe4a1023c..d1258f1a58 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go
@@ -22,7 +22,6 @@ package peers
import (
"io/ioutil"
- "log"
"os"
"sort"
"testing"
@@ -31,12 +30,14 @@ import (
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/ts"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -121,8 +122,11 @@ func writeTSDBFiles(
for _, v := range series {
bytes := checked.NewBytes(v.data, nil)
bytes.IncRef()
- require.NoError(t, w.Write(ident.StringID(v.id),
- sortedTagsFromTagsMap(v.tags), bytes, digest.Checksum(bytes.Bytes())))
+ metadata := persist.NewMetadataFromIDAndTags(ident.StringID(v.id),
+ sortedTagsFromTagsMap(v.tags),
+ persist.MetadataOptions{})
+ require.NoError(t, w.Write(metadata, bytes,
+ digest.Checksum(bytes.Bytes())))
bytes.DecRef()
}
@@ -233,12 +237,13 @@ func TestBootstrapIndex(t *testing.T) {
end := start.Add(ropts.RetentionPeriod())
- shardTimeRanges := map[uint32]xtime.Ranges{
- 0: xtime.NewRanges(xtime.Range{
+ shardTimeRanges := result.NewShardTimeRanges().Set(
+ 0,
+ xtime.NewRanges(xtime.Range{
Start: start,
End: end,
}),
- }
+ )
mockAdminSession := client.NewMockAdminSession(ctrl)
mockAdminSession.EXPECT().
@@ -275,9 +280,10 @@ func TestBootstrapIndex(t *testing.T) {
results := tester.ResultForNamespace(nsMetadata.ID())
indexResults := results.IndexResult.IndexResults()
numBlocksWithData := 0
- for _, b := range indexResults {
- if len(b.Segments()) != 0 {
- log.Printf("result block start: %s", b.BlockStart())
+ for _, indexBlockByVolumeType := range indexResults {
+ indexBlock, ok := indexBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
+ require.True(t, ok)
+ if len(indexBlock.Segments()) != 0 {
numBlocksWithData++
}
}
@@ -307,11 +313,12 @@ func TestBootstrapIndex(t *testing.T) {
},
} {
expectedAt := xtime.ToUnixNano(expected.indexBlockStart)
- indexBlock, ok := indexResults[expectedAt]
+ indexBlockByVolumeType, ok := indexResults[expectedAt]
+ require.True(t, ok)
+ indexBlock, ok := indexBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
- require.Equal(t, 1, len(indexBlock.Segments()))
for _, seg := range indexBlock.Segments() {
- reader, err := seg.Reader()
+ reader, err := seg.Segment().Reader()
require.NoError(t, err)
docs, err := reader.AllDocs()
@@ -352,23 +359,29 @@ func TestBootstrapIndex(t *testing.T) {
t2 := indexStart.Add(indexBlockSize)
t3 := t2.Add(indexBlockSize)
- blk1, ok := indexResults[xtime.ToUnixNano(t1)]
+ indexBlockByVolumeType, ok := indexResults[xtime.ToUnixNano(t1)]
require.True(t, ok)
- assertShardRangesEqual(t, result.NewShardTimeRanges(t1, t2, 0), blk1.Fulfilled())
+ blk1, ok := indexBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
+ require.True(t, ok)
+ assertShardRangesEqual(t, result.NewShardTimeRangesFromRange(t1, t2, 0), blk1.Fulfilled())
- blk2, ok := indexResults[xtime.ToUnixNano(t2)]
+ indexBlockByVolumeType, ok = indexResults[xtime.ToUnixNano(t2)]
+ require.True(t, ok)
+ blk2, ok := indexBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
- assertShardRangesEqual(t, result.NewShardTimeRanges(t2, t3, 0), blk2.Fulfilled())
+ assertShardRangesEqual(t, result.NewShardTimeRangesFromRange(t2, t3, 0), blk2.Fulfilled())
- for _, blk := range indexResults {
- if blk.BlockStart().Equal(t1) || blk.BlockStart().Equal(t2) {
+ for _, indexBlockByVolumeType := range indexResults {
+ if indexBlockByVolumeType.BlockStart().Equal(t1) || indexBlockByVolumeType.BlockStart().Equal(t2) {
continue // already checked above
}
// rest should all be marked fulfilled despite no data, because we didn't see
// any errors in the response.
- start := blk.BlockStart()
+ start := indexBlockByVolumeType.BlockStart()
end := start.Add(indexBlockSize)
- assertShardRangesEqual(t, result.NewShardTimeRanges(start, end, 0), blk.Fulfilled())
+ blk, ok := indexBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
+ require.True(t, ok)
+ assertShardRangesEqual(t, result.NewShardTimeRangesFromRange(start, end, 0), blk.Fulfilled())
}
tester.EnsureNoWrites()
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_test.go
index eba8f23150..02348cb804 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_test.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/topology"
tu "github.com/m3db/m3/src/dbnode/topology/testutil"
+ "github.com/m3db/m3/src/x/context"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
@@ -52,19 +53,19 @@ func TestPeersSourceAvailableDataAndIndex(t *testing.T) {
nsMetadata = testNamespaceMetadata(t)
numShards = uint32(4)
blockStart = time.Now().Truncate(blockSize)
- shardTimeRangesToBootstrap = result.ShardTimeRanges{}
- bootstrapRanges = xtime.Ranges{}.AddRange(xtime.Range{
+ shardTimeRangesToBootstrap = result.NewShardTimeRanges()
+ bootstrapRanges = xtime.NewRanges(xtime.Range{
Start: blockStart,
End: blockStart.Add(blockSize),
})
)
for i := 0; i < int(numShards); i++ {
- shardTimeRangesToBootstrap[uint32(i)] = bootstrapRanges
+ shardTimeRangesToBootstrap.Set(uint32(i), bootstrapRanges)
}
shardTimeRangesToBootstrapOneExtra := shardTimeRangesToBootstrap.Copy()
- shardTimeRangesToBootstrapOneExtra[100] = bootstrapRanges
+ shardTimeRangesToBootstrapOneExtra.Set(100, bootstrapRanges)
testCases := []struct {
title string
@@ -81,7 +82,7 @@ func TestPeersSourceAvailableDataAndIndex(t *testing.T) {
}),
bootstrapReadConsistency: topology.ReadConsistencyLevelMajority,
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
{
title: "Returns empty if all other peers initializing/unknown",
@@ -92,7 +93,7 @@ func TestPeersSourceAvailableDataAndIndex(t *testing.T) {
}),
bootstrapReadConsistency: topology.ReadConsistencyLevelMajority,
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
expectedErr: errors.New("unknown shard state: Unknown"),
},
{
@@ -126,7 +127,7 @@ func TestPeersSourceAvailableDataAndIndex(t *testing.T) {
}),
bootstrapReadConsistency: topology.ReadConsistencyLevelAll,
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
}
@@ -188,15 +189,22 @@ func TestPeersSourceReturnsErrorIfUnknownPersistenceFileSetType(t *testing.T) {
src, err := newPeersSource(opts)
require.NoError(t, err)
- target := result.ShardTimeRanges{
- 0: xtime.NewRanges(xtime.Range{Start: start, End: end}),
- 1: xtime.NewRanges(xtime.Range{Start: start, End: end}),
- }
+ target := result.NewShardTimeRanges().Set(
+ 0,
+ xtime.NewRanges(xtime.Range{Start: start, End: end}),
+ ).Set(
+ 1,
+ xtime.NewRanges(xtime.Range{Start: start, End: end}),
+ )
runOpts := testRunOptsWithPersist.SetPersistConfig(bootstrap.PersistConfig{Enabled: true, FileSetType: 999})
tester := bootstrap.BuildNamespacesTester(t, runOpts, target, testNsMd)
defer tester.Finish()
- _, err = src.Read(tester.Namespaces)
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ _, err = src.Read(ctx, tester.Namespaces)
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), "unknown persist config fileset file type"))
tester.EnsureNoLoadedBlocks()
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go
index 86770c3681..3e7193f607 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go
@@ -25,7 +25,6 @@ import (
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
- "github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
@@ -93,18 +92,6 @@ type Options interface {
// Compactor returns the compactor used to compact segment builders into segments.
Compactor() *compaction.Compactor
- // SetDatabaseBlockRetrieverManager sets the block retriever manager to
- // pass to newly flushed blocks when performing a bootstrap run with
- // persistence enabled.
- SetDatabaseBlockRetrieverManager(
- value block.DatabaseBlockRetrieverManager,
- ) Options
-
- // NewBlockRetrieverFn returns the block retriever manager to
- // pass to newly flushed blocks when performing a bootstrap run with
- // persistence enabled.
- DatabaseBlockRetrieverManager() block.DatabaseBlockRetrieverManager
-
// SetRuntimeOptionsManagers sets the RuntimeOptionsManager.
SetRuntimeOptionsManager(value m3dbruntime.OptionsManager) Options
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/persist.go b/src/dbnode/storage/bootstrap/bootstrapper/persist.go
index 1df2856f00..acb908c7f0 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/persist.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/persist.go
@@ -23,6 +23,7 @@ package bootstrapper
import (
"fmt"
"sync"
+ "time"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
@@ -30,6 +31,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/mmap"
xtime "github.com/m3db/m3/src/x/time"
)
@@ -54,18 +56,22 @@ type SharedCompactor struct {
func PersistBootstrapIndexSegment(
ns namespace.Metadata,
requestedRanges result.ShardTimeRanges,
- indexResults result.IndexResults,
builder segment.DocumentsBuilder,
persistManager *SharedPersistManager,
resultOpts result.Options,
-) error {
+ fulfilled result.ShardTimeRanges,
+ blockStart time.Time,
+ blockEnd time.Time,
+) (result.IndexBlock, error) {
+ // No-op if there are no documents that need to be written for this time block (nothing to persist).
+ if len(builder.Docs()) == 0 {
+ return result.IndexBlock{}, nil
+ }
+
// If we're performing an index run with persistence enabled
// determine if we covered a full block exactly (which should
// occur since we always group readers by block size).
- min, max := requestedRanges.MinMax()
- blockSize := ns.Options().IndexOptions().BlockSize()
- blockStart := min.Truncate(blockSize)
- blockEnd := blockStart.Add(blockSize)
+ _, max := requestedRanges.MinMax()
expectedRangeStart, expectedRangeEnd := blockStart, blockEnd
// Index blocks can be arbitrarily larger than data blocks, but the
@@ -91,65 +97,46 @@ func PersistBootstrapIndexSegment(
}
shards := make(map[uint32]struct{})
- expectedRanges := make(result.ShardTimeRanges, len(requestedRanges))
- for shard := range requestedRanges {
+ expectedRanges := result.NewShardTimeRangesFromSize(requestedRanges.Len())
+ for shard := range requestedRanges.Iter() {
shards[shard] = struct{}{}
- expectedRanges[shard] = xtime.Ranges{}.AddRange(xtime.Range{
+ expectedRanges.Set(shard, xtime.NewRanges(xtime.Range{
Start: expectedRangeStart,
End: expectedRangeEnd,
- })
- }
-
- indexBlock, ok := indexResults[xtime.ToUnixNano(blockStart)]
- if !ok {
- // NB(bodu): We currently write empty data files to disk, which means that we can attempt to bootstrap
- // time ranges that have no data and no index block.
- // For example:
- // - peers data bootstrap from peer nodes receives peer blocks w/ no data (empty)
- // - peers data bootstrap writes empty ts data files to disk
- // - peers index bootstrap reads empty ts data files md from disk
- // - attempt to bootstrap time ranges that have no index results block
- return fmt.Errorf("could not find index block in results: time=%s, ts=%d",
- blockStart.String(), blockStart.UnixNano())
- }
- if len(builder.Docs()) == 0 {
- // No-op if there are no documents that ned to be written for this time block (nothing to persist).
- return nil
+ }))
}
- var (
- fulfilled = indexBlock.Fulfilled()
- success = false
- persistedSegments []segment.Segment
+ return persistBootstrapIndexSegment(
+ ns,
+ shards,
+ builder,
+ persistManager,
+ requestedRanges,
+ expectedRanges,
+ fulfilled,
+ blockStart,
+ max,
)
- defer func() {
- if !success {
- return
- }
-
- // Combine persisted and existing segments.
- segments := make([]segment.Segment, 0, len(persistedSegments))
- for _, pSeg := range persistedSegments {
- segments = append(segments, NewSegment(pSeg, true))
- }
- for _, seg := range indexBlock.Segments() {
- segments = append(segments, seg)
- }
-
- // Now replace the active segment with the persisted segment.
- newFulfilled := fulfilled.Copy()
- newFulfilled.AddRanges(expectedRanges)
- replacedBlock := result.NewIndexBlock(blockStart, segments, newFulfilled)
- indexResults[xtime.ToUnixNano(blockStart)] = replacedBlock
- }()
+}
+func persistBootstrapIndexSegment(
+ ns namespace.Metadata,
+ shards map[uint32]struct{},
+ builder segment.DocumentsBuilder,
+ persistManager *SharedPersistManager,
+ requestedRanges result.ShardTimeRanges,
+ expectedRanges result.ShardTimeRanges,
+ fulfilled result.ShardTimeRanges,
+ blockStart time.Time,
+ max time.Time,
+) (result.IndexBlock, error) {
// Check that we completely fulfilled all shards for the block
// and we didn't bootstrap any more/less than expected.
requireFulfilled := expectedRanges.Copy()
requireFulfilled.Subtract(fulfilled)
- exactStartEnd := max.Equal(blockStart.Add(blockSize))
+ exactStartEnd := max.Equal(blockStart.Add(ns.Options().IndexOptions().BlockSize()))
if !exactStartEnd || !requireFulfilled.IsEmpty() {
- return fmt.Errorf("persistent fs index bootstrap invalid ranges to persist: "+
+ return result.IndexBlock{}, fmt.Errorf("persistent fs index bootstrap invalid ranges to persist: "+
"expected=%v, actual=%v, fulfilled=%v, exactStartEnd=%v, requireFulfilledEmpty=%v",
expectedRanges.String(), requestedRanges.String(), fulfilled.String(),
exactStartEnd, requireFulfilled.IsEmpty())
@@ -163,7 +150,7 @@ func PersistBootstrapIndexSegment(
flush, err := persistManager.Mgr.StartIndexPersist()
if err != nil {
- return err
+ return result.IndexBlock{}, err
}
var calledDone bool
@@ -175,12 +162,14 @@ func PersistBootstrapIndexSegment(
preparedPersist, err := flush.PrepareIndex(persist.IndexPrepareOptions{
NamespaceMetadata: ns,
- BlockStart: indexBlock.BlockStart(),
+ BlockStart: blockStart,
FileSetType: persist.FileSetFlushType,
Shards: shards,
+ // NB(bodu): Assume default volume type when persisted bootstrapped index data.
+ IndexVolumeType: idxpersist.DefaultIndexVolumeType,
})
if err != nil {
- return err
+ return result.IndexBlock{}, err
}
var calledClose bool
@@ -191,42 +180,46 @@ func PersistBootstrapIndexSegment(
}()
if err := preparedPersist.Persist(builder); err != nil {
- return err
+ return result.IndexBlock{}, err
}
calledClose = true
- persistedSegments, err = preparedPersist.Close()
+ persistedSegments, err := preparedPersist.Close()
if err != nil {
- return err
+ return result.IndexBlock{}, err
}
calledDone = true
if err := flush.DoneIndex(); err != nil {
- return err
+ return result.IndexBlock{}, err
+ }
+ segments := make([]result.Segment, 0, len(persistedSegments))
+ for _, pSeg := range persistedSegments {
+ segments = append(segments, result.NewSegment(pSeg, true))
}
- // Indicate the defer above should merge newly built segments w/ existing.
- success = true
- return nil
+ return result.NewIndexBlock(segments, expectedRanges), nil
}
// BuildBootstrapIndexSegment is a helper function that builds (in memory) bootstrapped index segments for a ns -> block of time.
func BuildBootstrapIndexSegment(
ns namespace.Metadata,
requestedRanges result.ShardTimeRanges,
- indexResults result.IndexResults,
builder segment.DocumentsBuilder,
compactor *SharedCompactor,
resultOpts result.Options,
mmapReporter mmap.Reporter,
-) error {
+ blockStart time.Time,
+ blockEnd time.Time,
+) (result.IndexBlock, error) {
+ // No-op if there are no documents that need to be written for this time block (nothing to persist).
+ if len(builder.Docs()) == 0 {
+ return result.IndexBlock{}, nil
+ }
+
// If we're performing an index run with persistence enabled
// determine if we covered a full block exactly (which should
// occur since we always group readers by block size).
- min, _ := requestedRanges.MinMax()
- blockSize := ns.Options().IndexOptions().BlockSize()
- blockStart := min.Truncate(blockSize)
- blockEnd := blockStart.Add(blockSize)
expectedRangeStart, expectedRangeEnd := blockStart, blockEnd
// Index blocks can be arbitrarily larger than data blocks, but the
@@ -251,29 +244,12 @@ func BuildBootstrapIndexSegment(
expectedRangeStart = earliestRetentionTime
}
- expectedRanges := make(result.ShardTimeRanges, len(requestedRanges))
- for shard := range requestedRanges {
- expectedRanges[shard] = xtime.Ranges{}.AddRange(xtime.Range{
+ expectedRanges := result.NewShardTimeRangesFromSize(requestedRanges.Len())
+ for shard := range requestedRanges.Iter() {
+ expectedRanges.Set(shard, xtime.NewRanges(xtime.Range{
Start: expectedRangeStart,
End: expectedRangeEnd,
- })
- }
-
- indexBlock, ok := indexResults[xtime.ToUnixNano(blockStart)]
- if !ok {
- // NB(bodu): We currently write empty data files to disk, which means that we can attempt to bootstrap
- // time ranges that have no data and no index block.
- // For example:
- // - peers data bootstrap from peer nodes receives peer blocks w/ no data (empty)
- // - peers data bootstrap writes empty ts data files to disk
- // - peers index bootstrap reads empty ts data files md from disk
- // - attempt to bootstrap time ranges that have no index results block
- return fmt.Errorf("could not find index block in results: time=%s, ts=%d",
- blockStart.String(), blockStart.UnixNano())
- }
- if len(builder.Docs()) == 0 {
- // No-op if there are no documents that ned to be written for this time block (nothing to persist).
- return nil
+ }))
}
compactor.Lock()
@@ -285,18 +261,33 @@ func BuildBootstrapIndexSegment(
Reporter: mmapReporter,
})
if err != nil {
- return err
+ return result.IndexBlock{}, err
}
- segments := []segment.Segment{NewSegment(seg, false)}
- for _, seg := range indexBlock.Segments() {
- segments = append(segments, seg)
- }
+ segs := []result.Segment{result.NewSegment(seg, false)}
+ indexResult := result.NewIndexBlock(segs, expectedRanges)
+ return indexResult, nil
+}
- // Now replace the active segment with the built segment.
- newFulfilled := indexBlock.Fulfilled().Copy()
- newFulfilled.AddRanges(expectedRanges)
- replacedBlock := result.NewIndexBlock(blockStart, segments, newFulfilled)
- indexResults[xtime.ToUnixNano(blockStart)] = replacedBlock
- return nil
+// GetDefaultIndexBlockForBlockStart gets the index block for the default volume type from the index results.
+func GetDefaultIndexBlockForBlockStart(
+ results result.IndexResults,
+ blockStart time.Time,
+) (result.IndexBlock, bool) {
+ indexBlockByVolumeType, ok := results[xtime.ToUnixNano(blockStart)]
+ if !ok {
+ // NB(bodu): We currently write empty data files to disk, which means that we can attempt to bootstrap
+ // time ranges that have no data and no index block.
+ // For example:
+ // - peers data bootstrap from peer nodes receives peer blocks w/ no data (empty)
+ // - peers data bootstrap writes empty ts data files to disk
+ // - peers index bootstrap reads empty ts data files md from disk
+ // - attempt to bootstrap time ranges that have no index results block
+ return result.IndexBlock{}, false
+ }
+ indexBlock, ok := indexBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
+ if !ok {
+ return result.IndexBlock{}, false
+ }
+ return indexBlock, true
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/ranges.go b/src/dbnode/storage/bootstrap/bootstrapper/ranges.go
index 1c5006c64a..6c394b9e63 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/ranges.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/ranges.go
@@ -48,8 +48,8 @@ func NewShardTimeRangesTimeWindowGroups(
End: minTime(t.Add(windowSize), max),
}
- group := make(result.ShardTimeRanges)
- for shard, tr := range shardTimeRanges {
+ group := result.NewShardTimeRanges()
+ for shard, tr := range shardTimeRanges.Iter() {
iter := tr.Iter()
for iter.Next() {
evaluateRange := iter.Value()
@@ -58,7 +58,7 @@ func NewShardTimeRangesTimeWindowGroups(
continue
}
// Add to this range.
- group[shard] = group[shard].AddRange(intersection)
+ group.GetOrAdd(shard).AddRange(intersection)
}
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/readers.go b/src/dbnode/storage/bootstrap/bootstrapper/readers.go
index d49e3a9a09..a44c823e81 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/readers.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/readers.go
@@ -24,14 +24,19 @@ import (
"sync"
"time"
+ "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/opentracing/opentracing-go"
+ opentracinglog "github.com/opentracing/opentracing-go/log"
"go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
)
// TimeWindowReaders are grouped by data block.
@@ -58,24 +63,41 @@ func newTimeWindowReaders(
}
}
+// EnqueueReadersOptions supplies options to enqueue readers.
+type EnqueueReadersOptions struct {
+ NsMD namespace.Metadata
+ RunOpts bootstrap.RunOptions
+ RuntimeOpts runtime.Options
+ FsOpts fs.Options
+ ShardTimeRanges result.ShardTimeRanges
+ ReaderPool *ReaderPool
+ ReadersCh chan<- TimeWindowReaders
+ BlockSize time.Duration
+ OptimizedReadMetadataOnly bool
+ Logger *zap.Logger
+ Span opentracing.Span
+ NowFn clock.NowFn
+}
+
// EnqueueReaders into a readers channel grouped by data block.
-func EnqueueReaders(
- ns namespace.Metadata,
- runOpts bootstrap.RunOptions,
- runtimeOpts runtime.Options,
- fsOpts fs.Options,
- shardsTimeRanges result.ShardTimeRanges,
- readerPool *ReaderPool,
- readersCh chan<- TimeWindowReaders,
- blockSize time.Duration,
- logger *zap.Logger,
-) {
+func EnqueueReaders(opts EnqueueReadersOptions) {
// Close the readers ch if and only if all readers are enqueued.
- defer close(readersCh)
+ defer close(opts.ReadersCh)
// Normal run, open readers
- enqueueReadersGroupedByBlockSize(ns, runOpts, fsOpts,
- shardsTimeRanges, readerPool, readersCh, blockSize, logger)
+ enqueueReadersGroupedByBlockSize(
+ opts.NsMD,
+ opts.RunOpts,
+ opts.FsOpts,
+ opts.ShardTimeRanges,
+ opts.ReaderPool,
+ opts.ReadersCh,
+ opts.BlockSize,
+ opts.OptimizedReadMetadataOnly,
+ opts.Logger,
+ opts.Span,
+ opts.NowFn,
+ )
}
func enqueueReadersGroupedByBlockSize(
@@ -86,17 +108,37 @@ func enqueueReadersGroupedByBlockSize(
readerPool *ReaderPool,
readersCh chan<- TimeWindowReaders,
blockSize time.Duration,
+ optimizedReadMetadataOnly bool,
logger *zap.Logger,
+ span opentracing.Span,
+ nowFn clock.NowFn,
) {
// Group them by block size.
groupFn := NewShardTimeRangesTimeWindowGroups
groupedByBlockSize := groupFn(shardTimeRanges, blockSize)
+ // Cache info files by shard.
+ readInfoFilesResultsByShard := make(map[uint32][]fs.ReadInfoFileResult)
+
// Now enqueue across all shards by block size.
for _, group := range groupedByBlockSize {
- readers := make(map[ShardID]ShardReaders, len(group.Ranges))
- for shard, tr := range group.Ranges {
- shardReaders := newShardReaders(ns, fsOpts, readerPool, shard, tr, logger)
+ readers := make(map[ShardID]ShardReaders, group.Ranges.Len())
+ for shard, tr := range group.Ranges.Iter() {
+ readInfoFilesResults, ok := readInfoFilesResultsByShard[shard]
+ if !ok {
+ start := nowFn()
+ logger.Debug("enqueue readers read info files start",
+ zap.Uint32("shard", shard))
+ readInfoFilesResults = fs.ReadInfoFiles(fsOpts.FilePathPrefix(),
+ ns.ID(), shard, fsOpts.InfoReaderBufferSize(),
+ fsOpts.DecodingOptions(), persist.FileSetFlushType)
+ logger.Debug("enqueue readers read info files done",
+ zap.Uint32("shard", shard),
+ zap.Duration("took", nowFn().Sub(start)))
+ readInfoFilesResultsByShard[shard] = readInfoFilesResults
+ }
+ shardReaders := newShardReaders(ns, fsOpts, readerPool, shard, tr,
+ optimizedReadMetadataOnly, logger, span, nowFn, readInfoFilesResults)
readers[ShardID(shard)] = shardReaders
}
readersCh <- newTimeWindowReaders(group.Ranges, readers)
@@ -109,15 +151,31 @@ func newShardReaders(
readerPool *ReaderPool,
shard uint32,
tr xtime.Ranges,
+ optimizedReadMetadataOnly bool,
logger *zap.Logger,
+ span opentracing.Span,
+ nowFn clock.NowFn,
+ readInfoFilesResults []fs.ReadInfoFileResult,
) ShardReaders {
- readInfoFilesResults := fs.ReadInfoFiles(fsOpts.FilePathPrefix(),
- ns.ID(), shard, fsOpts.InfoReaderBufferSize(), fsOpts.DecodingOptions())
+ logSpan := func(event string) {
+ span.LogFields(
+ opentracinglog.String("event", event),
+ opentracinglog.Uint32("shard", shard),
+ opentracinglog.String("tr", tr.String()),
+ )
+ }
+ logFields := []zapcore.Field{
+ zap.Uint32("shard", shard),
+ zap.String("tr", tr.String()),
+ }
if len(readInfoFilesResults) == 0 {
// No readers.
return ShardReaders{}
}
+ start := nowFn()
+ logger.Debug("enqueue readers open data readers start", logFields...)
+ logSpan("enqueue_readers_open_data_readers_start")
readers := make([]fs.DataFileSetReader, 0, len(readInfoFilesResults))
for i := 0; i < len(readInfoFilesResults); i++ {
result := readInfoFilesResults[i]
@@ -154,11 +212,8 @@ func newShardReaders(
}
openOpts := fs.DataReaderOpenOptions{
- Identifier: fs.FileSetFileIdentifier{
- Namespace: ns.ID(),
- Shard: shard,
- BlockStart: blockStart,
- },
+ Identifier: fs.NewFileSetFileIdentifier(ns.ID(), blockStart, shard, info.VolumeIndex),
+ OptimizedReadMetadataOnly: optimizedReadMetadataOnly,
}
if err := r.Open(openOpts); err != nil {
logger.Error("unable to open fileset files",
@@ -174,6 +229,9 @@ func newShardReaders(
readers = append(readers, r)
}
+ logger.Debug("enqueue readers open data readers done",
+ append(logFields, zap.Duration("took", nowFn().Sub(start)))...)
+ logSpan("enqueue_readers_open_data_readers_done")
return ShardReaders{Readers: readers}
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source.go b/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source.go
index c8dfa698fb..1493f189b1 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source.go
@@ -28,6 +28,8 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/dbnode/tracepoint"
+ "github.com/m3db/m3/src/x/context"
)
// The purpose of the unitializedSource is to succeed bootstraps for any
@@ -73,10 +75,10 @@ func (s *uninitializedTopologySource) availability(
) (result.ShardTimeRanges, error) {
var (
topoState = runOpts.InitialTopologyState()
- availableShardTimeRanges = result.ShardTimeRanges{}
+ availableShardTimeRanges = result.NewShardTimeRanges()
)
- for shardIDUint := range shardsTimeRanges {
+ for shardIDUint := range shardsTimeRanges.Iter() {
shardID := topology.ShardID(shardIDUint)
hostShardStates, ok := topoState.ShardStates[shardID]
if !ok {
@@ -126,7 +128,9 @@ func (s *uninitializedTopologySource) availability(
// factor to actually increase correctly.
shardHasNeverBeenCompletelyInitialized := numInitializing-numLeaving > 0
if shardHasNeverBeenCompletelyInitialized {
- availableShardTimeRanges[shardIDUint] = shardsTimeRanges[shardIDUint]
+ if tr, ok := shardsTimeRanges.Get(shardIDUint); ok {
+ availableShardTimeRanges.Set(shardIDUint, tr)
+ }
}
}
@@ -134,8 +138,12 @@ func (s *uninitializedTopologySource) availability(
}
func (s *uninitializedTopologySource) Read(
+ ctx context.Context,
namespaces bootstrap.Namespaces,
) (bootstrap.NamespaceResults, error) {
+ ctx, span, _ := ctx.StartSampledTraceSpan(tracepoint.BootstrapperUninitializedSourceRead)
+ defer span.Finish()
+
results := bootstrap.NamespaceResults{
Results: bootstrap.NewNamespaceResultsMap(bootstrap.NamespaceResultsMapOptions{}),
}
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source_test.go b/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source_test.go
index ec3a8a3553..85125b6a80 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/uninitialized/source_test.go
@@ -51,8 +51,8 @@ func TestUnitializedTopologySourceAvailableDataAndAvailableIndex(t *testing.T) {
blockSize = 2 * time.Hour
numShards = uint32(4)
blockStart = time.Now().Truncate(blockSize)
- shardTimeRangesToBootstrap = result.ShardTimeRanges{}
- bootstrapRanges = xtime.Ranges{}.AddRange(xtime.Range{
+ shardTimeRangesToBootstrap = result.NewShardTimeRanges()
+ bootstrapRanges = xtime.NewRanges(xtime.Range{
Start: blockStart,
End: blockStart.Add(blockSize),
})
@@ -63,7 +63,7 @@ func TestUnitializedTopologySourceAvailableDataAndAvailableIndex(t *testing.T) {
require.NoError(t, err)
for i := 0; i < int(numShards); i++ {
- shardTimeRangesToBootstrap[uint32(i)] = bootstrapRanges
+ shardTimeRangesToBootstrap.Set(uint32(i), bootstrapRanges)
}
testCases := []struct {
@@ -101,7 +101,7 @@ func TestUnitializedTopologySourceAvailableDataAndAvailableIndex(t *testing.T) {
tu.SelfID: tu.ShardsRange(0, numShards, shard.Leaving),
}),
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
// Snould return that it can't bootstrap anything because it's not
// a new namespace.
@@ -111,7 +111,7 @@ func TestUnitializedTopologySourceAvailableDataAndAvailableIndex(t *testing.T) {
tu.SelfID: tu.ShardsRange(0, numShards, shard.Available),
}),
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
// Snould return that it can bootstrap everything because
// it's a new namespace.
@@ -148,7 +148,7 @@ func TestUnitializedTopologySourceAvailableDataAndAvailableIndex(t *testing.T) {
notSelfID2: tu.ShardsRange(0, numShards, shard.Available),
}),
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
// Snould return that it can't bootstrap anything because it's not
// a new namespace, we're just doing a node replace.
@@ -161,7 +161,7 @@ func TestUnitializedTopologySourceAvailableDataAndAvailableIndex(t *testing.T) {
notSelfID3: tu.ShardsRange(0, numShards, shard.Initializing),
}),
shardsTimeRangesToBootstrap: shardTimeRangesToBootstrap,
- expectedAvailableShardsTimeRanges: result.ShardTimeRanges{},
+ expectedAvailableShardsTimeRanges: result.NewShardTimeRanges(),
},
// Snould return that it can't bootstrap anything because we don't
// know how to interpret the unknown host.
diff --git a/src/dbnode/storage/bootstrap/namespace_results_new_map_gen.go b/src/dbnode/storage/bootstrap/namespace_results_new_map_gen.go
index 8b02405418..b37df84484 100644
--- a/src/dbnode/storage/bootstrap/namespace_results_new_map_gen.go
+++ b/src/dbnode/storage/bootstrap/namespace_results_new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/storage/bootstrap/namespaces_new_map_gen.go b/src/dbnode/storage/bootstrap/namespaces_new_map_gen.go
index 5fb41e3e47..f3f874467d 100644
--- a/src/dbnode/storage/bootstrap/namespaces_new_map_gen.go
+++ b/src/dbnode/storage/bootstrap/namespaces_new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/storage/bootstrap/noop.go b/src/dbnode/storage/bootstrap/noop.go
index 297e7a4615..ab66b62ac5 100644
--- a/src/dbnode/storage/bootstrap/noop.go
+++ b/src/dbnode/storage/bootstrap/noop.go
@@ -22,6 +22,8 @@ package bootstrap
import (
"time"
+
+ "github.com/m3db/m3/src/x/context"
)
type noOpBootstrapProcessProvider struct{}
@@ -45,8 +47,18 @@ func (b noOpBootstrapProcessProvider) Provide() (Process, error) {
type noOpBootstrapProcess struct{}
func (b noOpBootstrapProcess) Run(
+ ctx context.Context,
start time.Time,
namespaces []ProcessNamespace,
) (NamespaceResults, error) {
+ // Run hooks if any for testing purposes.
+ for _, ns := range namespaces {
+ if err := ns.Hooks.BootstrapSourceBegin(); err != nil {
+ return NamespaceResults{}, err
+ }
+ if err := ns.Hooks.BootstrapSourceEnd(); err != nil {
+ return NamespaceResults{}, err
+ }
+ }
return NewNamespaceResults(NewNamespaces(namespaces)), nil
}
diff --git a/src/dbnode/storage/bootstrap/process.go b/src/dbnode/storage/bootstrap/process.go
index 8957482cd2..f875918fae 100644
--- a/src/dbnode/storage/bootstrap/process.go
+++ b/src/dbnode/storage/bootstrap/process.go
@@ -31,8 +31,11 @@ import (
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/dbnode/tracepoint"
+ "github.com/m3db/m3/src/x/context"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/opentracing/opentracing-go/log"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
@@ -151,6 +154,7 @@ type bootstrapProcess struct {
}
func (b bootstrapProcess) Run(
+ ctx context.Context,
at time.Time,
namespaces []ProcessNamespace,
) (NamespaceResults, error) {
@@ -165,7 +169,10 @@ func (b bootstrapProcess) Run(
idxopts := namespace.Metadata.Options().IndexOptions()
dataRanges := b.targetRangesForData(at, ropts)
indexRanges := b.targetRangesForIndex(at, ropts, idxopts)
-
+ firstRanges := b.newShardTimeRanges(
+ dataRanges.firstRangeWithPersistTrue.Range,
+ namespace.Shards,
+ )
namespacesRunFirst.Namespaces.Set(namespace.Metadata.ID(), Namespace{
Metadata: namespace.Metadata,
Shards: namespace.Shards,
@@ -174,16 +181,18 @@ func (b bootstrapProcess) Run(
DataTargetRange: dataRanges.firstRangeWithPersistTrue,
IndexTargetRange: indexRanges.firstRangeWithPersistTrue,
DataRunOptions: NamespaceRunOptions{
- ShardTimeRanges: b.newShardTimeRanges(
- dataRanges.firstRangeWithPersistTrue.Range, namespace.Shards),
- RunOptions: dataRanges.firstRangeWithPersistTrue.RunOptions,
+ ShardTimeRanges: firstRanges.Copy(),
+ TargetShardTimeRanges: firstRanges.Copy(),
+ RunOptions: dataRanges.firstRangeWithPersistTrue.RunOptions,
},
IndexRunOptions: NamespaceRunOptions{
- ShardTimeRanges: b.newShardTimeRanges(
- indexRanges.firstRangeWithPersistTrue.Range, namespace.Shards),
- RunOptions: indexRanges.firstRangeWithPersistTrue.RunOptions,
+ ShardTimeRanges: firstRanges.Copy(),
+ TargetShardTimeRanges: firstRanges.Copy(),
+ RunOptions: indexRanges.firstRangeWithPersistTrue.RunOptions,
},
})
+ secondRanges := b.newShardTimeRanges(
+ dataRanges.secondRangeWithPersistFalse.Range, namespace.Shards)
namespacesRunSecond.Namespaces.Set(namespace.Metadata.ID(), Namespace{
Metadata: namespace.Metadata,
Shards: namespace.Shards,
@@ -192,14 +201,14 @@ func (b bootstrapProcess) Run(
DataTargetRange: dataRanges.secondRangeWithPersistFalse,
IndexTargetRange: indexRanges.secondRangeWithPersistFalse,
DataRunOptions: NamespaceRunOptions{
- ShardTimeRanges: b.newShardTimeRanges(
- dataRanges.secondRangeWithPersistFalse.Range, namespace.Shards),
- RunOptions: dataRanges.secondRangeWithPersistFalse.RunOptions,
+ ShardTimeRanges: secondRanges.Copy(),
+ TargetShardTimeRanges: secondRanges.Copy(),
+ RunOptions: dataRanges.secondRangeWithPersistFalse.RunOptions,
},
IndexRunOptions: NamespaceRunOptions{
- ShardTimeRanges: b.newShardTimeRanges(
- indexRanges.secondRangeWithPersistFalse.Range, namespace.Shards),
- RunOptions: indexRanges.secondRangeWithPersistFalse.RunOptions,
+ ShardTimeRanges: secondRanges.Copy(),
+ TargetShardTimeRanges: secondRanges.Copy(),
+ RunOptions: indexRanges.secondRangeWithPersistFalse.RunOptions,
},
})
}
@@ -209,42 +218,71 @@ func (b bootstrapProcess) Run(
namespacesRunFirst,
namespacesRunSecond,
} {
- for _, entry := range namespaces.Namespaces.Iter() {
- namespace := entry.Value()
- logFields := b.logFields(namespace.Metadata, namespace.Shards,
- namespace.DataTargetRange.Range, namespace.IndexTargetRange.Range)
- b.logBootstrapRun(logFields)
- }
-
- begin := b.nowFn()
- res, err := b.bootstrapper.Bootstrap(namespaces)
- took := b.nowFn().Sub(begin)
+ res, err := b.runPass(ctx, namespaces)
if err != nil {
- b.log.Error("bootstrap process error",
- zap.Duration("took", took),
- zap.Error(err))
return NamespaceResults{}, err
}
- for _, entry := range namespaces.Namespaces.Iter() {
- namespace := entry.Value()
- nsID := namespace.Metadata.ID()
+ bootstrapResult = MergeNamespaceResults(bootstrapResult, res)
+ }
- result, ok := res.Results.Get(nsID)
- if !ok {
- return NamespaceResults{},
- fmt.Errorf("result missing for namespace: %v", nsID.String())
- }
+ return bootstrapResult, nil
+}
+
+func (b bootstrapProcess) runPass(
+ ctx context.Context,
+ namespaces Namespaces,
+) (NamespaceResults, error) {
+ ctx, span, sampled := ctx.StartSampledTraceSpan(tracepoint.BootstrapProcessRun)
+ defer span.Finish()
- logFields := b.logFields(namespace.Metadata, namespace.Shards,
- namespace.DataTargetRange.Range, namespace.IndexTargetRange.Range)
- b.logBootstrapResult(result, logFields, took)
+ i := 0
+ for _, entry := range namespaces.Namespaces.Iter() {
+ ns := entry.Value()
+ idx := i
+ i++
+
+ if sampled {
+ ext := fmt.Sprintf("[%d]", idx)
+ span.LogFields(
+ log.String("namespace"+ext, ns.Metadata.ID().String()),
+ log.Int("shards"+ext, len(ns.Shards)),
+ log.String("dataRange"+ext, ns.DataTargetRange.Range.String()),
+ log.String("indexRange"+ext, ns.IndexTargetRange.Range.String()),
+ )
}
- bootstrapResult = MergeNamespaceResults(bootstrapResult, res)
+ logFields := b.logFields(ns.Metadata, ns.Shards,
+ ns.DataTargetRange.Range, ns.IndexTargetRange.Range)
+ b.logBootstrapRun(logFields)
}
- return bootstrapResult, nil
+ begin := b.nowFn()
+ res, err := b.bootstrapper.Bootstrap(ctx, namespaces)
+ took := b.nowFn().Sub(begin)
+ if err != nil {
+ b.log.Error("bootstrap process error",
+ zap.Duration("took", took),
+ zap.Error(err))
+ return NamespaceResults{}, err
+ }
+
+ for _, entry := range namespaces.Namespaces.Iter() {
+ namespace := entry.Value()
+ nsID := namespace.Metadata.ID()
+
+ result, ok := res.Results.Get(nsID)
+ if !ok {
+ return NamespaceResults{},
+ fmt.Errorf("result missing for namespace: %v", nsID.String())
+ }
+
+ logFields := b.logFields(namespace.Metadata, namespace.Shards,
+ namespace.DataTargetRange.Range, namespace.IndexTargetRange.Range)
+ b.logBootstrapResult(result, logFields, took)
+ }
+
+ return res, nil
}
func (b bootstrapProcess) logFields(
@@ -275,10 +313,10 @@ func (b bootstrapProcess) newShardTimeRanges(
window xtime.Range,
shards []uint32,
) result.ShardTimeRanges {
- shardsTimeRanges := make(result.ShardTimeRanges, len(shards))
+ shardsTimeRanges := result.NewShardTimeRanges()
ranges := xtime.NewRanges(window)
for _, s := range shards {
- shardsTimeRanges[s] = ranges
+ shardsTimeRanges.Set(s, ranges)
}
return shardsTimeRanges
}
diff --git a/src/dbnode/storage/bootstrap/result/new_map_gen.go b/src/dbnode/storage/bootstrap/result/new_map_gen.go
index 4edad92b6d..1cca90a434 100644
--- a/src/dbnode/storage/bootstrap/result/new_map_gen.go
+++ b/src/dbnode/storage/bootstrap/result/new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/storage/bootstrap/result/result_data.go b/src/dbnode/storage/bootstrap/result/result_data.go
index 87838d7ae6..6189905bf8 100644
--- a/src/dbnode/storage/bootstrap/result/result_data.go
+++ b/src/dbnode/storage/bootstrap/result/result_data.go
@@ -34,7 +34,7 @@ type dataBootstrapResult struct {
// NewDataBootstrapResult creates a new result.
func NewDataBootstrapResult() DataBootstrapResult {
return &dataBootstrapResult{
- unfulfilled: make(ShardTimeRanges),
+ unfulfilled: NewShardTimeRanges(),
}
}
diff --git a/src/dbnode/storage/bootstrap/result/result_data_test.go b/src/dbnode/storage/bootstrap/result/result_data_test.go
index 2e5eb242b3..e120783181 100644
--- a/src/dbnode/storage/bootstrap/result/result_data_test.go
+++ b/src/dbnode/storage/bootstrap/result/result_data_test.go
@@ -44,7 +44,7 @@ func testResultOptions() Options {
func TestDataResultSetUnfulfilledMergeShardResults(t *testing.T) {
start := time.Now().Truncate(testBlockSize)
- rangeOne := ShardTimeRanges{
+ rangeOne := shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(8 * testBlockSize),
@@ -55,7 +55,7 @@ func TestDataResultSetUnfulfilledMergeShardResults(t *testing.T) {
}),
}
- rangeTwo := ShardTimeRanges{
+ rangeTwo := shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start.Add(6 * testBlockSize),
End: start.Add(10 * testBlockSize),
@@ -81,7 +81,7 @@ func TestDataResultSetUnfulfilledMergeShardResults(t *testing.T) {
assert.True(t, rMerged.Unfulfilled().Equal(rangeOne))
rMerged = MergedDataBootstrapResult(r, rTwo)
- expected := ShardTimeRanges{
+ expected := shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(10 * testBlockSize),
@@ -101,27 +101,27 @@ func TestDataResultSetUnfulfilledMergeShardResults(t *testing.T) {
func TestDataResultSetUnfulfilledOverwitesUnfulfilled(t *testing.T) {
start := time.Now().Truncate(testBlockSize)
r := NewDataBootstrapResult()
- r.SetUnfulfilled(ShardTimeRanges{
+ r.SetUnfulfilled(shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(8 * testBlockSize),
}),
})
- expected := ShardTimeRanges{0: xtime.NewRanges(xtime.Range{
+ expected := shardTimeRanges{0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(8 * testBlockSize),
})}
assert.True(t, r.Unfulfilled().Equal(expected))
- r.SetUnfulfilled(ShardTimeRanges{
+ r.SetUnfulfilled(shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start.Add(6 * testBlockSize),
End: start.Add(10 * testBlockSize),
}),
})
- expected = ShardTimeRanges{0: xtime.NewRanges(xtime.Range{
+ expected = shardTimeRanges{0: xtime.NewRanges(xtime.Range{
Start: start.Add(6 * testBlockSize),
End: start.Add(10 * testBlockSize),
})}
@@ -133,7 +133,7 @@ func TestResultSetUnfulfilled(t *testing.T) {
start := time.Now().Truncate(testBlockSize)
r := NewDataBootstrapResult()
- r.SetUnfulfilled(ShardTimeRanges{
+ r.SetUnfulfilled(shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(2 * testBlockSize),
@@ -143,14 +143,14 @@ func TestResultSetUnfulfilled(t *testing.T) {
End: start.Add(2 * testBlockSize),
}),
})
- r.SetUnfulfilled(ShardTimeRanges{
+ r.SetUnfulfilled(shardTimeRanges{
1: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(2 * testBlockSize),
}),
})
- assert.True(t, r.Unfulfilled().Equal(ShardTimeRanges{
+ assert.True(t, r.Unfulfilled().Equal(shardTimeRanges{
1: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(2 * testBlockSize),
@@ -273,17 +273,17 @@ func TestShardResultRemoveSeries(t *testing.T) {
}
func TestShardTimeRangesIsEmpty(t *testing.T) {
- assert.True(t, ShardTimeRanges{}.IsEmpty())
- assert.True(t, ShardTimeRanges{0: xtime.Ranges{}, 1: xtime.Ranges{}}.IsEmpty())
- assert.True(t, ShardTimeRanges{0: xtime.NewRanges(xtime.Range{})}.IsEmpty())
- assert.False(t, ShardTimeRanges{0: xtime.NewRanges(xtime.Range{
+ assert.True(t, shardTimeRanges{}.IsEmpty())
+ assert.True(t, shardTimeRanges{0: xtime.NewRanges(), 1: xtime.NewRanges()}.IsEmpty())
+ assert.True(t, shardTimeRanges{0: xtime.NewRanges(xtime.Range{})}.IsEmpty())
+ assert.False(t, shardTimeRanges{0: xtime.NewRanges(xtime.Range{
Start: time.Now(),
End: time.Now().Add(time.Second),
})}.IsEmpty())
}
func TestShardTimeRangesCopy(t *testing.T) {
- str := ShardTimeRanges{0: xtime.NewRanges(xtime.Range{
+ str := shardTimeRanges{0: xtime.NewRanges(xtime.Range{
Start: time.Now(),
End: time.Now().Add(time.Second),
})}
@@ -294,7 +294,7 @@ func TestShardTimeRangesCopy(t *testing.T) {
}
func TestShardTimeRangesToUnfulfilledDataResult(t *testing.T) {
- str := ShardTimeRanges{
+ str := shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: time.Now(),
End: time.Now().Add(time.Minute),
@@ -311,7 +311,7 @@ func TestShardTimeRangesToUnfulfilledDataResult(t *testing.T) {
func TestShardTimeRangesSubtract(t *testing.T) {
start := time.Now().Truncate(testBlockSize)
- str := ShardTimeRanges{
+ str := shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(2 * testBlockSize),
@@ -321,7 +321,7 @@ func TestShardTimeRangesSubtract(t *testing.T) {
End: start.Add(2 * testBlockSize),
}),
}
- str.Subtract(ShardTimeRanges{
+ str.Subtract(shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(testBlockSize),
@@ -332,7 +332,7 @@ func TestShardTimeRangesSubtract(t *testing.T) {
}),
})
- assert.True(t, str.Equal(ShardTimeRanges{
+ assert.True(t, str.Equal(shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start.Add(testBlockSize),
End: start.Add(2 * testBlockSize),
@@ -348,7 +348,7 @@ func TestShardTimeRangesMinMax(t *testing.T) {
start := time.Now().Truncate(testBlockSize)
- str := ShardTimeRanges{
+ str := shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(testBlockSize),
@@ -374,14 +374,10 @@ func TestShardTimeRangesString(t *testing.T) {
[]time.Time{start, start.Add(2 * testBlockSize)},
}
- str := ShardTimeRanges{
- 0: xtime.NewRanges(xtime.Range{
- Start: ts[0][0],
- End: ts[0][1],
- }).AddRange(xtime.Range{
- Start: ts[1][0],
- End: ts[1][1],
- }),
+ str := shardTimeRanges{
+ 0: xtime.NewRanges(
+ xtime.Range{Start: ts[0][0], End: ts[0][1]},
+ xtime.Range{Start: ts[1][0], End: ts[1][1]}),
1: xtime.NewRanges(xtime.Range{
Start: ts[2][0],
End: ts[2][1],
@@ -399,14 +395,10 @@ func TestShardTimeRangesString(t *testing.T) {
func TestShardTimeRangesSummaryString(t *testing.T) {
start := time.Unix(1472824800, 0)
- str := ShardTimeRanges{
- 0: xtime.NewRanges(xtime.Range{
- Start: start,
- End: start.Add(testBlockSize),
- }).AddRange(xtime.Range{
- Start: start.Add(2 * testBlockSize),
- End: start.Add(4 * testBlockSize),
- }),
+ str := shardTimeRanges{
+ 0: xtime.NewRanges(
+ xtime.Range{Start: start, End: start.Add(testBlockSize)},
+ xtime.Range{Start: start.Add(2 * testBlockSize), End: start.Add(4 * testBlockSize)}),
1: xtime.NewRanges(xtime.Range{
Start: start,
End: start.Add(2 * testBlockSize),
diff --git a/src/dbnode/storage/bootstrap/result/result_index.go b/src/dbnode/storage/bootstrap/result/result_index.go
index 96270f1c01..1a398139a1 100644
--- a/src/dbnode/storage/bootstrap/result/result_index.go
+++ b/src/dbnode/storage/bootstrap/result/result_index.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -29,6 +29,7 @@ import (
"github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
+ "github.com/m3db/m3/src/m3ninx/persist"
xtime "github.com/m3db/m3/src/x/time"
)
@@ -49,7 +50,7 @@ type indexBootstrapResult struct {
func NewIndexBootstrapResult() IndexBootstrapResult {
return &indexBootstrapResult{
results: make(IndexResults),
- unfulfilled: make(ShardTimeRanges),
+ unfulfilled: NewShardTimeRanges(),
}
}
@@ -65,16 +66,18 @@ func (r *indexBootstrapResult) SetUnfulfilled(unfulfilled ShardTimeRanges) {
r.unfulfilled = unfulfilled
}
-func (r *indexBootstrapResult) Add(block IndexBlock, unfulfilled ShardTimeRanges) {
- r.results.Add(block)
+func (r *indexBootstrapResult) Add(blocks IndexBlockByVolumeType, unfulfilled ShardTimeRanges) {
+ r.results.Add(blocks)
r.unfulfilled.AddRanges(unfulfilled)
}
func (r *indexBootstrapResult) NumSeries() int {
var size int64
- for _, b := range r.results {
- for _, s := range b.segments {
- size += s.Size()
+ for _, blockByVolumeType := range r.results {
+ for _, b := range blockByVolumeType.data {
+ for _, s := range b.segments {
+ size += s.Segment().Size()
+ }
}
}
return int(size)
@@ -143,32 +146,33 @@ func (r IndexResults) AddBlockIfNotExists(
_, exists := r[blockStartNanos]
if !exists {
- r[blockStartNanos] = NewIndexBlock(blockStart, nil, nil)
+ r[blockStartNanos] = NewIndexBlockByVolumeType(blockStart)
}
}
// Add will add an index block to the collection, merging if one already
// exists.
-func (r IndexResults) Add(block IndexBlock) {
- if block.BlockStart().IsZero() {
+func (r IndexResults) Add(blocks IndexBlockByVolumeType) {
+ if blocks.BlockStart().IsZero() {
return
}
// Merge results
- blockStart := xtime.ToUnixNano(block.BlockStart())
+ blockStart := xtime.ToUnixNano(blocks.BlockStart())
existing, ok := r[blockStart]
if !ok {
- r[blockStart] = block
+ r[blockStart] = blocks
return
}
- r[blockStart] = existing.Merged(block)
+
+ r[blockStart] = existing.Merged(blocks)
}
// AddResults will add another set of index results to the collection, merging
// if index blocks already exists.
func (r IndexResults) AddResults(other IndexResults) {
- for _, block := range other {
- r.Add(block)
+ for _, blocks := range other {
+ r.Add(blocks)
}
}
@@ -177,6 +181,7 @@ func (r IndexResults) AddResults(other IndexResults) {
func (r IndexResults) MarkFulfilled(
t time.Time,
fulfilled ShardTimeRanges,
+ indexVolumeType persist.IndexVolumeType,
idxopts namespace.IndexOptions,
) error {
// NB(r): The reason we can align by the retention block size and guarantee
@@ -198,12 +203,18 @@ func (r IndexResults) MarkFulfilled(
fulfilled.SummaryString(), blockRange.String())
}
- block, exists := r[blockStartNanos]
+ blocks, exists := r[blockStartNanos]
+ if !exists {
+ blocks = NewIndexBlockByVolumeType(blockStart)
+ r[blockStartNanos] = blocks
+ }
+
+ block, exists := blocks.data[indexVolumeType]
if !exists {
- block = NewIndexBlock(blockStart, nil, nil)
- r[blockStartNanos] = block
+ block = NewIndexBlock(nil, nil)
+ blocks.data[indexVolumeType] = block
}
- r[blockStartNanos] = block.Merged(NewIndexBlock(blockStart, nil, fulfilled))
+ blocks.data[indexVolumeType] = block.Merged(NewIndexBlock(nil, fulfilled))
return nil
}
@@ -219,10 +230,14 @@ func MergedIndexBootstrapResult(i, j IndexBootstrapResult) IndexBootstrapResult
}
sizeI, sizeJ := 0, 0
for _, ir := range i.IndexResults() {
- sizeI += len(ir.Segments())
+ for _, b := range ir.data {
+ sizeI += len(b.Segments())
+ }
}
for _, ir := range j.IndexResults() {
- sizeJ += len(ir.Segments())
+ for _, b := range ir.data {
+ sizeJ += len(b.Segments())
+ }
}
if sizeI >= sizeJ {
i.IndexResults().AddResults(j.IndexResults())
@@ -236,27 +251,20 @@ func MergedIndexBootstrapResult(i, j IndexBootstrapResult) IndexBootstrapResult
// NewIndexBlock returns a new bootstrap index block result.
func NewIndexBlock(
- blockStart time.Time,
- segments []segment.Segment,
+ segments []Segment,
fulfilled ShardTimeRanges,
) IndexBlock {
if fulfilled == nil {
- fulfilled = ShardTimeRanges{}
+ fulfilled = NewShardTimeRanges()
}
return IndexBlock{
- blockStart: blockStart,
- segments: segments,
- fulfilled: fulfilled,
+ segments: segments,
+ fulfilled: fulfilled,
}
}
-// BlockStart returns the block start.
-func (b IndexBlock) BlockStart() time.Time {
- return b.blockStart
-}
-
// Segments returns the segments.
-func (b IndexBlock) Segments() []segment.Segment {
+func (b IndexBlock) Segments() []Segment {
return b.segments
}
@@ -279,3 +287,47 @@ func (b IndexBlock) Merged(other IndexBlock) IndexBlock {
}
return r
}
+
+// NewIndexBlockByVolumeType returns a new bootstrap index blocks by volume type result.
+func NewIndexBlockByVolumeType(blockStart time.Time) IndexBlockByVolumeType {
+ return IndexBlockByVolumeType{
+ blockStart: blockStart,
+ data: make(map[persist.IndexVolumeType]IndexBlock),
+ }
+}
+
+// BlockStart returns the block start.
+func (b IndexBlockByVolumeType) BlockStart() time.Time {
+ return b.blockStart
+}
+
+// GetBlock returns an IndexBlock for volumeType.
+func (b IndexBlockByVolumeType) GetBlock(volumeType persist.IndexVolumeType) (IndexBlock, bool) {
+ block, ok := b.data[volumeType]
+ return block, ok
+}
+
+// SetBlock sets an IndexBlock for volumeType.
+func (b IndexBlockByVolumeType) SetBlock(volumeType persist.IndexVolumeType, block IndexBlock) {
+ b.data[volumeType] = block
+}
+
+// Iter returns the underlying iterable map data.
+func (b IndexBlockByVolumeType) Iter() map[persist.IndexVolumeType]IndexBlock {
+ return b.data
+}
+
+// Merged returns a new merged index block by volume type.
+// It merges the underlying index blocks together by index volume type.
+func (b IndexBlockByVolumeType) Merged(other IndexBlockByVolumeType) IndexBlockByVolumeType {
+ r := b
+ for volumeType, otherBlock := range other.data {
+ existing, ok := r.data[volumeType]
+ if !ok {
+ r.data[volumeType] = otherBlock
+ continue
+ }
+ r.data[volumeType] = existing.Merged(otherBlock)
+ }
+ return r
+}
diff --git a/src/dbnode/storage/bootstrap/result/result_index_test.go b/src/dbnode/storage/bootstrap/result/result_index_test.go
index 98b23218e8..230e5e428b 100644
--- a/src/dbnode/storage/bootstrap/result/result_index_test.go
+++ b/src/dbnode/storage/bootstrap/result/result_index_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
@@ -39,33 +40,47 @@ func TestIndexResultMergeMergesExistingSegments(t *testing.T) {
start := time.Now().Truncate(testBlockSize)
- segments := []segment.Segment{
- segment.NewMockSegment(ctrl),
- segment.NewMockSegment(ctrl),
- segment.NewMockSegment(ctrl),
- segment.NewMockSegment(ctrl),
- segment.NewMockSegment(ctrl),
- segment.NewMockSegment(ctrl),
+ segments := []Segment{
+ NewSegment(segment.NewMockSegment(ctrl), false),
+ NewSegment(segment.NewMockSegment(ctrl), false),
+ NewSegment(segment.NewMockSegment(ctrl), false),
+ NewSegment(segment.NewMockSegment(ctrl), false),
+ NewSegment(segment.NewMockSegment(ctrl), false),
+ NewSegment(segment.NewMockSegment(ctrl), false),
}
times := []time.Time{start, start.Add(testBlockSize), start.Add(2 * testBlockSize)}
- tr0 := NewShardTimeRanges(times[0], times[1], 1, 2, 3)
- tr1 := NewShardTimeRanges(times[1], times[2], 1, 2, 3)
+ tr0 := NewShardTimeRangesFromRange(times[0], times[1], 1, 2, 3)
+ tr1 := NewShardTimeRangesFromRange(times[1], times[2], 1, 2, 3)
first := NewIndexBootstrapResult()
- first.Add(NewIndexBlock(times[0], []segment.Segment{segments[0]}, tr0), nil)
- first.Add(NewIndexBlock(times[0], []segment.Segment{segments[1]}, tr0), nil)
- first.Add(NewIndexBlock(times[1], []segment.Segment{segments[2], segments[3]}, tr1), nil)
+ blk1 := NewIndexBlockByVolumeType(times[0])
+ blk1.SetBlock(idxpersist.DefaultIndexVolumeType, NewIndexBlock([]Segment{segments[0]}, tr0))
+ first.Add(blk1, nil)
+ blk2 := NewIndexBlockByVolumeType(times[0])
+ blk2.SetBlock(idxpersist.DefaultIndexVolumeType, NewIndexBlock([]Segment{segments[1]}, tr0))
+ first.Add(blk2, nil)
+ blk3 := NewIndexBlockByVolumeType(times[1])
+ blk3.SetBlock(idxpersist.DefaultIndexVolumeType, NewIndexBlock([]Segment{segments[2], segments[3]}, tr1))
+ first.Add(blk3, nil)
second := NewIndexBootstrapResult()
- second.Add(NewIndexBlock(times[0], []segment.Segment{segments[4]}, tr0), nil)
- second.Add(NewIndexBlock(times[1], []segment.Segment{segments[5]}, tr1), nil)
+ blk4 := NewIndexBlockByVolumeType(times[0])
+ blk4.SetBlock(idxpersist.DefaultIndexVolumeType, NewIndexBlock([]Segment{segments[4]}, tr0))
+ second.Add(blk4, nil)
+ blk5 := NewIndexBlockByVolumeType(times[1])
+ blk5.SetBlock(idxpersist.DefaultIndexVolumeType, NewIndexBlock([]Segment{segments[5]}, tr1))
+ second.Add(blk5, nil)
merged := MergedIndexBootstrapResult(first, second)
expected := NewIndexBootstrapResult()
- expected.Add(NewIndexBlock(times[0], []segment.Segment{segments[0], segments[1], segments[4]}, tr0), nil)
- expected.Add(NewIndexBlock(times[1], []segment.Segment{segments[2], segments[3], segments[5]}, tr1), nil)
+ blk6 := NewIndexBlockByVolumeType(times[0])
+ blk6.SetBlock(idxpersist.DefaultIndexVolumeType, NewIndexBlock([]Segment{segments[0], segments[1], segments[4]}, tr0))
+ expected.Add(blk6, nil)
+ blk7 := NewIndexBlockByVolumeType(times[1])
+ blk7.SetBlock(idxpersist.DefaultIndexVolumeType, NewIndexBlock([]Segment{segments[2], segments[3], segments[5]}, tr1))
+ expected.Add(blk7, nil)
assert.True(t, segmentsInResultsSame(expected.IndexResults(), merged.IndexResults()))
}
@@ -79,7 +94,7 @@ func TestIndexResultSetUnfulfilled(t *testing.T) {
return t0.Add(time.Duration(i) * time.Hour)
}
results := NewIndexBootstrapResult()
- testRanges := NewShardTimeRanges(tn(0), tn(1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ testRanges := NewShardTimeRangesFromRange(tn(0), tn(1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
results.SetUnfulfilled(testRanges)
require.Equal(t, testRanges, results.Unfulfilled())
}
@@ -93,13 +108,13 @@ func TestIndexResultAdd(t *testing.T) {
return t0.Add(time.Duration(i) * time.Hour)
}
results := NewIndexBootstrapResult()
- testRanges := NewShardTimeRanges(tn(0), tn(1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
- results.Add(IndexBlock{}, testRanges)
+ testRanges := NewShardTimeRangesFromRange(tn(0), tn(1), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ results.Add(NewIndexBlockByVolumeType(time.Time{}), testRanges)
require.Equal(t, testRanges, results.Unfulfilled())
}
func TestShardTimeRangesToUnfulfilledIndexResult(t *testing.T) {
- str := ShardTimeRanges{
+ str := shardTimeRanges{
0: xtime.NewRanges(xtime.Range{
Start: time.Now(),
End: time.Now().Add(time.Minute),
@@ -114,7 +129,7 @@ func TestShardTimeRangesToUnfulfilledIndexResult(t *testing.T) {
assert.True(t, r.Unfulfilled().Equal(str))
}
-func TestIndexResulsMarkFulfilled(t *testing.T) {
+func TestIndexResultsMarkFulfilled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -127,36 +142,42 @@ func TestIndexResulsMarkFulfilled(t *testing.T) {
// range checks
require.Error(t, results.MarkFulfilled(tn(0),
- NewShardTimeRanges(tn(4), tn(6), 1), iopts))
+ NewShardTimeRangesFromRange(tn(4), tn(6), 1), idxpersist.DefaultIndexVolumeType, iopts))
require.Error(t, results.MarkFulfilled(tn(0),
- NewShardTimeRanges(tn(-1), tn(1), 1), iopts))
+ NewShardTimeRangesFromRange(tn(-1), tn(1), 1), idxpersist.DefaultIndexVolumeType, iopts))
// valid add
- fulfilledRange := NewShardTimeRanges(tn(0), tn(1), 1)
- require.NoError(t, results.MarkFulfilled(tn(0), fulfilledRange, iopts))
+ fulfilledRange := NewShardTimeRangesFromRange(tn(0), tn(1), 1)
+ require.NoError(t, results.MarkFulfilled(tn(0), fulfilledRange, idxpersist.DefaultIndexVolumeType, iopts))
require.Equal(t, 1, len(results))
- blk, ok := results[xtime.ToUnixNano(tn(0))]
+ blkByVolumeType, ok := results[xtime.ToUnixNano(tn(0))]
+ require.True(t, ok)
+ require.True(t, tn(0).Equal(blkByVolumeType.blockStart))
+ blk, ok := blkByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
- require.True(t, tn(0).Equal(blk.blockStart))
require.Equal(t, fulfilledRange, blk.fulfilled)
// additional add for same block
- nextFulfilledRange := NewShardTimeRanges(tn(1), tn(2), 2)
- require.NoError(t, results.MarkFulfilled(tn(1), nextFulfilledRange, iopts))
+ nextFulfilledRange := NewShardTimeRangesFromRange(tn(1), tn(2), 2)
+ require.NoError(t, results.MarkFulfilled(tn(1), nextFulfilledRange, idxpersist.DefaultIndexVolumeType, iopts))
require.Equal(t, 1, len(results))
- blk, ok = results[xtime.ToUnixNano(tn(0))]
+ blkByVolumeType, ok = results[xtime.ToUnixNano(tn(0))]
require.True(t, ok)
- require.True(t, tn(0).Equal(blk.blockStart))
+ require.True(t, tn(0).Equal(blkByVolumeType.blockStart))
fulfilledRange.AddRanges(nextFulfilledRange)
+ blk, ok = blkByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
+ require.True(t, ok)
require.Equal(t, fulfilledRange, blk.fulfilled)
// additional add for next block
- nextFulfilledRange = NewShardTimeRanges(tn(2), tn(4), 1, 2, 3)
- require.NoError(t, results.MarkFulfilled(tn(2), nextFulfilledRange, iopts))
+ nextFulfilledRange = NewShardTimeRangesFromRange(tn(2), tn(4), 1, 2, 3)
+ require.NoError(t, results.MarkFulfilled(tn(2), nextFulfilledRange, idxpersist.DefaultIndexVolumeType, iopts))
require.Equal(t, 2, len(results))
- blk, ok = results[xtime.ToUnixNano(tn(2))]
+ blkByVolumeType, ok = results[xtime.ToUnixNano(tn(2))]
+ require.True(t, ok)
+ require.True(t, tn(2).Equal(blkByVolumeType.blockStart))
+ blk, ok = blkByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
require.True(t, ok)
- require.True(t, tn(2).Equal(blk.blockStart))
require.Equal(t, nextFulfilledRange, blk.fulfilled)
}
@@ -164,8 +185,16 @@ func segmentsInResultsSame(a, b IndexResults) bool {
if len(a) != len(b) {
return false
}
- for t, block := range a {
- otherBlock, ok := b[t]
+ for t, blockByVolumeType := range a {
+ otherBlockByVolumeType, ok := b[t]
+ if !ok {
+ return false
+ }
+ block, ok := blockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
+ if !ok {
+ return false
+ }
+ otherBlock, ok := otherBlockByVolumeType.GetBlock(idxpersist.DefaultIndexVolumeType)
if !ok {
return false
}
diff --git a/src/dbnode/storage/bootstrap/result/shard_ranges.go b/src/dbnode/storage/bootstrap/result/shard_ranges.go
index 2d547b913a..c7ae0479aa 100644
--- a/src/dbnode/storage/bootstrap/result/shard_ranges.go
+++ b/src/dbnode/storage/bootstrap/result/shard_ranges.go
@@ -29,18 +29,58 @@ import (
xtime "github.com/m3db/m3/src/x/time"
)
-// NewShardTimeRanges returns a new ShardTimeRanges with provided shards and time range.
-func NewShardTimeRanges(start, end time.Time, shards ...uint32) ShardTimeRanges {
+// NewShardTimeRangesFromRange returns a new ShardTimeRanges with provided shards and time range.
+func NewShardTimeRangesFromRange(start, end time.Time, shards ...uint32) ShardTimeRanges {
timeRange := xtime.NewRanges(xtime.Range{Start: start, End: end})
- ranges := make(map[uint32]xtime.Ranges)
+ ranges := make(shardTimeRanges, len(shards))
for _, s := range shards {
ranges[s] = timeRange
}
return ranges
}
+// NewShardTimeRangesFromSize returns a new ShardTimeRanges with provided shards and time range.
+func NewShardTimeRangesFromSize(size int) ShardTimeRanges {
+ return make(shardTimeRanges, size)
+}
+
+// NewShardTimeRanges returns an empty ShardTimeRanges.
+func NewShardTimeRanges() ShardTimeRanges {
+ return make(shardTimeRanges)
+}
+
+// Get time ranges for a shard.
+func (r shardTimeRanges) Get(shard uint32) (xtime.Ranges, bool) {
+ tr, ok := r[shard]
+ return tr, ok
+}
+
+// Set time ranges for a shard.
+func (r shardTimeRanges) Set(shard uint32, ranges xtime.Ranges) ShardTimeRanges {
+ r[shard] = ranges
+ return r
+}
+
+// GetOrAdd gets or adds time ranges for a shard.
+func (r shardTimeRanges) GetOrAdd(shard uint32) xtime.Ranges {
+ if r[shard] == nil {
+ r[shard] = xtime.NewRanges()
+ }
+ return r[shard]
+}
+
+// Len returns then number of shards.
+func (r shardTimeRanges) Len() int {
+ return len(r)
+}
+
+// Iter returns the underlying map.
+func (r shardTimeRanges) Iter() map[uint32]xtime.Ranges {
+ return r
+}
+
// IsEmpty returns whether the shard time ranges is empty or not.
-func (r ShardTimeRanges) IsEmpty() bool {
+func (r shardTimeRanges) IsEmpty() bool {
for _, ranges := range r {
if !ranges.IsEmpty() {
return false
@@ -50,13 +90,13 @@ func (r ShardTimeRanges) IsEmpty() bool {
}
// Equal returns whether two shard time ranges are equal.
-func (r ShardTimeRanges) Equal(other ShardTimeRanges) bool {
- if len(r) != len(other) {
+func (r shardTimeRanges) Equal(other ShardTimeRanges) bool {
+ if len(r) != other.Len() {
return false
}
for shard, ranges := range r {
- otherRanges, ok := other[shard]
- if !ok {
+ otherRanges := other.GetOrAdd(shard)
+ if otherRanges == nil {
return false
}
if ranges.Len() != otherRanges.Len() {
@@ -76,32 +116,77 @@ func (r ShardTimeRanges) Equal(other ShardTimeRanges) bool {
return true
}
+// IsSuperset returns whether the current shard time ranges is a superset of the
+// other shard time ranges.
+func (r shardTimeRanges) IsSuperset(other ShardTimeRanges) bool {
+ if len(r) < other.Len() {
+ return false
+ }
+ for shard, ranges := range r {
+ otherRanges := other.GetOrAdd(shard)
+ if ranges.Len() < otherRanges.Len() {
+ return false
+ }
+ it := ranges.Iter()
+ otherIt := otherRanges.Iter()
+
+ // NB(bodu): Both of these iterators are sorted by time
+ // and the block sizes are expected to line up.
+ // The logic is that if we finish iterating through otherIt then
+ // the current ranges are a superset of the other ranges.
+ missedRange := false
+ otherIteratorNext:
+ for otherIt.Next() {
+ for it.Next() {
+ if otherIt.Value().Equal(it.Value()) {
+ continue otherIteratorNext
+ }
+ }
+
+ missedRange = true
+ break
+ }
+
+ // If there is an unmatched range (not empty) left in `otherIt` then the current shard ranges
+ // are NOT a superset of the other shard ranges.
+ if missedRange {
+ return false
+ }
+ }
+ return true
+}
+
// Copy will return a copy of the current shard time ranges.
-func (r ShardTimeRanges) Copy() ShardTimeRanges {
- result := make(map[uint32]xtime.Ranges, len(r))
+func (r shardTimeRanges) Copy() ShardTimeRanges {
+ result := make(shardTimeRanges, len(r))
for shard, ranges := range r {
- result[shard] = xtime.Ranges{}.AddRanges(ranges)
+ newRanges := xtime.NewRanges()
+ newRanges.AddRanges(ranges)
+ result[shard] = newRanges
}
return result
}
// AddRanges adds other shard time ranges to the current shard time ranges.
-func (r ShardTimeRanges) AddRanges(other ShardTimeRanges) {
- for shard, ranges := range other {
+func (r shardTimeRanges) AddRanges(other ShardTimeRanges) {
+ if other == nil {
+ return
+ }
+ for shard, ranges := range other.Iter() {
if ranges.IsEmpty() {
continue
}
if existing, ok := r[shard]; ok {
- r[shard] = existing.AddRanges(ranges)
+ existing.AddRanges(ranges)
} else {
- r[shard] = ranges
+ r[shard] = ranges.Clone()
}
}
}
// ToUnfulfilledDataResult will return a result that is comprised of wholly
// unfufilled time ranges from the set of shard time ranges.
-func (r ShardTimeRanges) ToUnfulfilledDataResult() DataBootstrapResult {
+func (r shardTimeRanges) ToUnfulfilledDataResult() DataBootstrapResult {
result := NewDataBootstrapResult()
result.SetUnfulfilled(r.Copy())
return result
@@ -109,21 +194,25 @@ func (r ShardTimeRanges) ToUnfulfilledDataResult() DataBootstrapResult {
// ToUnfulfilledIndexResult will return a result that is comprised of wholly
// unfufilled time ranges from the set of shard time ranges.
-func (r ShardTimeRanges) ToUnfulfilledIndexResult() IndexBootstrapResult {
+func (r shardTimeRanges) ToUnfulfilledIndexResult() IndexBootstrapResult {
result := NewIndexBootstrapResult()
result.SetUnfulfilled(r.Copy())
return result
}
// Subtract will subtract another range from the current range.
-func (r ShardTimeRanges) Subtract(other ShardTimeRanges) {
+func (r shardTimeRanges) Subtract(other ShardTimeRanges) {
+ if other == nil {
+ return
+ }
for shard, ranges := range r {
- otherRanges, ok := other[shard]
+ otherRanges, ok := other.Get(shard)
if !ok {
continue
}
- subtractedRanges := ranges.RemoveRanges(otherRanges)
+ subtractedRanges := ranges.Clone()
+ subtractedRanges.RemoveRanges(otherRanges)
if subtractedRanges.IsEmpty() {
delete(r, shard)
} else {
@@ -134,7 +223,7 @@ func (r ShardTimeRanges) Subtract(other ShardTimeRanges) {
// MinMax will return the very minimum time as a start and the
// maximum time as an end in the ranges.
-func (r ShardTimeRanges) MinMax() (time.Time, time.Time) {
+func (r shardTimeRanges) MinMax() (time.Time, time.Time) {
min, max := time.Time{}, time.Time{}
for _, ranges := range r {
if ranges.IsEmpty() {
@@ -155,17 +244,17 @@ func (r ShardTimeRanges) MinMax() (time.Time, time.Time) {
}
// MinMaxRange returns the min and max times, and the duration for this range.
-func (r ShardTimeRanges) MinMaxRange() (time.Time, time.Time, time.Duration) {
+func (r shardTimeRanges) MinMaxRange() (time.Time, time.Time, time.Duration) {
min, max := r.MinMax()
return min, max, max.Sub(min)
}
type summaryFn func(xtime.Ranges) string
-func (r ShardTimeRanges) summarize(sfn summaryFn) string {
- values := make([]shardTimeRanges, 0, len(r))
+func (r shardTimeRanges) summarize(sfn summaryFn) string {
+ values := make([]shardTimeRangesPair, 0, len(r))
for shard, ranges := range r {
- values = append(values, shardTimeRanges{shard: shard, value: ranges})
+ values = append(values, shardTimeRangesPair{shard: shard, value: ranges})
}
sort.Sort(shardTimeRangesByShard(values))
@@ -190,7 +279,7 @@ func (r ShardTimeRanges) summarize(sfn summaryFn) string {
}
// String returns a description of the time ranges
-func (r ShardTimeRanges) String() string {
+func (r shardTimeRanges) String() string {
return r.summarize(xtime.Ranges.String)
}
@@ -207,16 +296,16 @@ func rangesDuration(ranges xtime.Ranges) string {
}
// SummaryString returns a summary description of the time ranges
-func (r ShardTimeRanges) SummaryString() string {
+func (r shardTimeRanges) SummaryString() string {
return r.summarize(rangesDuration)
}
-type shardTimeRanges struct {
+type shardTimeRangesPair struct {
shard uint32
value xtime.Ranges
}
-type shardTimeRangesByShard []shardTimeRanges
+type shardTimeRangesByShard []shardTimeRangesPair
func (str shardTimeRangesByShard) Len() int { return len(str) }
func (str shardTimeRangesByShard) Swap(i, j int) { str[i], str[j] = str[j], str[i] }
diff --git a/src/dbnode/storage/bootstrap/result/shard_ranges_test.go b/src/dbnode/storage/bootstrap/result/shard_ranges_test.go
new file mode 100644
index 0000000000..19a04adff0
--- /dev/null
+++ b/src/dbnode/storage/bootstrap/result/shard_ranges_test.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package result
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestShardTimeRangesAdd(t *testing.T) {
+ start := time.Now().Truncate(testBlockSize)
+ times := []time.Time{start, start.Add(testBlockSize), start.Add(2 * testBlockSize), start.Add(3 * testBlockSize)}
+
+ sr := []ShardTimeRanges{
+ NewShardTimeRangesFromRange(times[0], times[1], 1, 2, 3),
+ NewShardTimeRangesFromRange(times[1], times[2], 1, 2, 3),
+ NewShardTimeRangesFromRange(times[2], times[3], 1, 2, 3),
+ }
+ ranges := NewShardTimeRanges()
+ for _, r := range sr {
+ ranges.AddRanges(r)
+ }
+ for i, r := range sr {
+ min, max, r := r.MinMaxRange()
+ require.Equal(t, r, testBlockSize)
+ require.Equal(t, min, times[i])
+ require.Equal(t, max, times[i+1])
+ }
+}
+
+func TestShardTimeRangesIsSuperset(t *testing.T) {
+ start := time.Now().Truncate(testBlockSize)
+ times := []time.Time{start, start.Add(testBlockSize), start.Add(2 * testBlockSize), start.Add(3 * testBlockSize)}
+
+ sr1 := []ShardTimeRanges{
+ NewShardTimeRangesFromRange(times[0], times[1], 1, 2, 3),
+ NewShardTimeRangesFromRange(times[1], times[2], 1, 2, 3),
+ NewShardTimeRangesFromRange(times[2], times[3], 1, 2, 3),
+ }
+ ranges1 := NewShardTimeRanges()
+ for _, r := range sr1 {
+ ranges1.AddRanges(r)
+ }
+ sr2 := []ShardTimeRanges{
+ NewShardTimeRangesFromRange(times[0], times[1], 2),
+ NewShardTimeRangesFromRange(times[1], times[2], 1, 2),
+ NewShardTimeRangesFromRange(times[2], times[3], 1),
+ }
+ ranges2 := NewShardTimeRanges()
+ for _, r := range sr2 {
+ ranges2.AddRanges(r)
+ }
+ sr3 := []ShardTimeRanges{
+ NewShardTimeRangesFromRange(times[1], times[2], 1, 2, 3),
+ }
+ ranges3 := NewShardTimeRanges()
+ for _, r := range sr3 {
+ ranges3.AddRanges(r)
+ }
+
+ require.True(t, ranges1.IsSuperset(ranges2))
+ require.True(t, ranges1.IsSuperset(ranges1))
+ require.True(t, ranges1.IsSuperset(ranges3))
+
+ // Reverse sanity checks.
+ require.False(t, ranges2.IsSuperset(ranges1))
+ require.False(t, ranges3.IsSuperset(ranges1))
+
+ // Added some more false checks for non overlapping time ranges and no time ranges.
+ sr1 = []ShardTimeRanges{
+ NewShardTimeRangesFromRange(times[0], times[1], 1, 2, 3),
+ }
+ ranges1 = NewShardTimeRanges()
+ for _, r := range sr1 {
+ ranges1.AddRanges(r)
+ }
+ sr2 = []ShardTimeRanges{
+ NewShardTimeRangesFromRange(times[1], times[2], 1, 2, 3),
+ }
+ ranges2 = NewShardTimeRanges()
+ for _, r := range sr2 {
+ ranges2.AddRanges(r)
+ }
+ ranges3 = NewShardTimeRanges()
+
+ require.False(t, ranges2.IsSuperset(ranges1))
+ require.False(t, ranges1.IsSuperset(ranges2))
+ require.True(t, ranges2.IsSuperset(ranges3))
+ require.False(t, ranges3.IsSuperset(ranges1))
+ require.False(t, ranges3.IsSuperset(ranges2))
+}
diff --git a/src/dbnode/storage/bootstrap/result/types.go b/src/dbnode/storage/bootstrap/result/types.go
index 4f8dd644e9..423553ee22 100644
--- a/src/dbnode/storage/bootstrap/result/types.go
+++ b/src/dbnode/storage/bootstrap/result/types.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
xtime "github.com/m3db/m3/src/x/time"
@@ -53,25 +54,54 @@ type IndexBootstrapResult interface {
SetUnfulfilled(unfulfilled ShardTimeRanges)
// Add adds an index block result.
- Add(block IndexBlock, unfulfilled ShardTimeRanges)
+ Add(blocks IndexBlockByVolumeType, unfulfilled ShardTimeRanges)
// NumSeries returns the total number of series across all segments.
NumSeries() int
}
// IndexResults is a set of index blocks indexed by block start.
-type IndexResults map[xtime.UnixNano]IndexBlock
+type IndexResults map[xtime.UnixNano]IndexBlockByVolumeType
// IndexBuilder wraps a index segment builder w/ batching.
type IndexBuilder struct {
builder segment.DocumentsBuilder
}
-// IndexBlock contains the bootstrap data structures for an index block.
-type IndexBlock struct {
+// IndexBlockByVolumeType contains the bootstrap data structures for an index block by volume type.
+type IndexBlockByVolumeType struct {
blockStart time.Time
- segments []segment.Segment
- fulfilled ShardTimeRanges
+ data map[persist.IndexVolumeType]IndexBlock
+}
+
+// IndexBlock is an index block for a index volume type.
+type IndexBlock struct {
+ segments []Segment
+ fulfilled ShardTimeRanges
+}
+
+// Segment wraps an index segment so we can easily determine whether or not the segment is persisted to disk.
+type Segment struct {
+ segment segment.Segment
+ persisted bool
+}
+
+// NewSegment returns an index segment w/ persistence metadata.
+func NewSegment(segment segment.Segment, persisted bool) Segment {
+ return Segment{
+ segment: segment,
+ persisted: persisted,
+ }
+}
+
+// IsPersisted returns whether or not the underlying segment was persisted to disk.
+func (s Segment) IsPersisted() bool {
+ return s.persisted
+}
+
+// Segment returns a segment.
+func (s Segment) Segment() segment.Segment {
+ return s.segment
}
// DocumentsBuilderAllocator allocates a new DocumentsBuilder type when
@@ -123,7 +153,63 @@ type DatabaseSeriesBlocks struct {
type ShardResults map[uint32]ShardResult
// ShardTimeRanges is a map of shards to time ranges.
-type ShardTimeRanges map[uint32]xtime.Ranges
+type ShardTimeRanges interface {
+ // Get time ranges for a shard.
+ Get(shard uint32) (xtime.Ranges, bool)
+
+ // Set time ranges for a shard.
+ Set(shard uint32, ranges xtime.Ranges) ShardTimeRanges
+
+ // GetOrAdd gets or adds time ranges for a shard.
+ GetOrAdd(shard uint32) xtime.Ranges
+
+ // AddRanges adds other shard time ranges to the current shard time ranges.
+ AddRanges(ranges ShardTimeRanges)
+
+ // Iter returns the underlying map.
+ Iter() map[uint32]xtime.Ranges
+
+ Copy() ShardTimeRanges
+
+ // IsSuperset returns whether the current shard time ranges are a
+ // superset of the other shard time ranges.
+ IsSuperset(other ShardTimeRanges) bool
+
+ // Equal returns whether two shard time ranges are equal.
+ Equal(other ShardTimeRanges) bool
+
+ // ToUnfulfilledDataResult will return a result that is comprised of wholly
+ // unfufilled time ranges from the set of shard time ranges.
+ ToUnfulfilledDataResult() DataBootstrapResult
+
+ // ToUnfulfilledIndexResult will return a result that is comprised of wholly
+ // unfufilled time ranges from the set of shard time ranges.
+ ToUnfulfilledIndexResult() IndexBootstrapResult
+
+ // Subtract will subtract another range from the current range.
+ Subtract(other ShardTimeRanges)
+
+ // MinMax will return the very minimum time as a start and the
+ // maximum time as an end in the ranges.
+ MinMax() (time.Time, time.Time)
+
+ // MinMaxRange returns the min and max times, and the duration for this range.
+ MinMaxRange() (time.Time, time.Time, time.Duration)
+
+ // String returns a description of the time ranges
+ String() string
+
+ // SummaryString returns a summary description of the time ranges
+ SummaryString() string
+
+ // IsEmpty returns whether the shard time ranges is empty or not.
+ IsEmpty() bool
+
+ // Len returns the number of shards
+ Len() int
+}
+
+type shardTimeRanges map[uint32]xtime.Ranges
// Options represents the options for bootstrap results.
type Options interface {
diff --git a/src/dbnode/storage/bootstrap/types.go b/src/dbnode/storage/bootstrap/types.go
index 2a42a4643f..f6bb4b932b 100644
--- a/src/dbnode/storage/bootstrap/types.go
+++ b/src/dbnode/storage/bootstrap/types.go
@@ -29,6 +29,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -54,7 +55,11 @@ type ProcessProvider interface {
// with the mindset that it will always be set to default values from the constructor.
type Process interface {
// Run runs the bootstrap process, returning the bootstrap result and any error encountered.
- Run(start time.Time, namespaces []ProcessNamespace) (NamespaceResults, error)
+ Run(
+ ctx context.Context,
+ start time.Time,
+ namespaces []ProcessNamespace,
+ ) (NamespaceResults, error)
}
// ProcessNamespace is a namespace to pass to the bootstrap process.
@@ -74,10 +79,15 @@ type NamespaceHooks struct {
opts NamespaceHooksOptions
}
+// Hook wraps a runnable callback.
+type Hook interface {
+ Run() error
+}
+
// NamespaceHooksOptions is a set of hooks options.
type NamespaceHooksOptions struct {
- BootstrapSourceBegin func() error
- BootstrapSourceEnd func() error
+ BootstrapSourceBegin Hook
+ BootstrapSourceEnd Hook
}
// NewNamespaceHooks returns a new set of bootstrap hooks.
@@ -90,7 +100,7 @@ func (h NamespaceHooks) BootstrapSourceBegin() error {
if h.opts.BootstrapSourceBegin == nil {
return nil
}
- return h.opts.BootstrapSourceBegin()
+ return h.opts.BootstrapSourceBegin.Run()
}
// BootstrapSourceEnd is a hook to call when a bootstrap source ends.
@@ -98,7 +108,7 @@ func (h NamespaceHooks) BootstrapSourceEnd() error {
if h.opts.BootstrapSourceEnd == nil {
return nil
}
- return h.opts.BootstrapSourceEnd()
+ return h.opts.BootstrapSourceEnd.Run()
}
// Namespaces are a set of namespaces being bootstrapped.
@@ -214,8 +224,13 @@ type Namespace struct {
// NamespaceRunOptions are the run options for a bootstrap process run.
type NamespaceRunOptions struct {
// ShardTimeRanges are the time ranges for the shards that should be fulfilled
- // by the bootstrapper.
+ // by the bootstrapper. This changes each bootstrapper pass as time ranges are fulfilled.
ShardTimeRanges result.ShardTimeRanges
+ // TargetShardTimeRanges are the original target time ranges for shards and does not change
+ // each bootstrapper pass.
+ // NB(bodu): This is used by the commit log bootstrapper as it needs to run for the entire original
+ // target shard time ranges.
+ TargetShardTimeRanges result.ShardTimeRanges
// RunOptions are the run options for the bootstrap run.
RunOptions RunOptions
}
@@ -375,7 +390,7 @@ type Bootstrapper interface {
// A bootstrapper should only return an error should it want to entirely
// cancel the bootstrapping of the node, i.e. non-recoverable situation
// like not being able to read from the filesystem.
- Bootstrap(namespaces Namespaces) (NamespaceResults, error)
+ Bootstrap(ctx context.Context, namespaces Namespaces) (NamespaceResults, error)
}
// Source represents a bootstrap source. Note that a source can and will be reused so
@@ -401,5 +416,5 @@ type Source interface {
// A bootstrapper source should only return an error should it want to
// entirely cancel the bootstrapping of the node, i.e. non-recoverable
// situation like not being able to read from the filesystem.
- Read(namespaces Namespaces) (NamespaceResults, error)
+ Read(ctx context.Context, namespaces Namespaces) (NamespaceResults, error)
}
diff --git a/src/dbnode/storage/bootstrap/util.go b/src/dbnode/storage/bootstrap/util.go
index f213f4bca3..560b9fc48f 100644
--- a/src/dbnode/storage/bootstrap/util.go
+++ b/src/dbnode/storage/bootstrap/util.go
@@ -266,7 +266,7 @@ func (a *TestDataAccumulator) checkoutSeriesWithLock(
unit xtime.Unit,
annotation []byte,
_ series.WriteOptions,
- ) (bool, error) {
+ ) (bool, series.WriteType, error) {
a.Lock()
a.writeMap[stringID] = append(
a.writeMap[stringID], series.DecodedTestValue{
@@ -276,7 +276,7 @@ func (a *TestDataAccumulator) checkoutSeriesWithLock(
Annotation: annotation,
})
a.Unlock()
- return true, nil
+ return true, series.WarmWrite, nil
}).AnyTimes()
result := CheckoutSeriesResult{
@@ -347,8 +347,8 @@ func BuildNamespacesTesterWithReaderIteratorPool(
iterPool encoding.MultiReaderIteratorPool,
mds ...namespace.Metadata,
) NamespacesTester {
- shards := make([]uint32, 0, len(ranges))
- for shard := range ranges {
+ shards := make([]uint32, 0, ranges.Len())
+ for shard := range ranges.Iter() {
shards = append(shards, shard)
}
@@ -378,12 +378,14 @@ func BuildNamespacesTesterWithReaderIteratorPool(
Shards: shards,
DataAccumulator: acc,
DataRunOptions: NamespaceRunOptions{
- ShardTimeRanges: ranges.Copy(),
- RunOptions: runOpts,
+ ShardTimeRanges: ranges.Copy(),
+ TargetShardTimeRanges: ranges.Copy(),
+ RunOptions: runOpts,
},
IndexRunOptions: NamespaceRunOptions{
- ShardTimeRanges: ranges.Copy(),
- RunOptions: runOpts,
+ ShardTimeRanges: ranges.Copy(),
+ TargetShardTimeRanges: ranges.Copy(),
+ RunOptions: runOpts,
},
})
}
@@ -536,7 +538,9 @@ func (nt *NamespacesTester) ResultForNamespace(id ident.ID) NamespaceResult {
// TestBootstrapWith bootstraps the current Namespaces with the
// provided bootstrapper.
func (nt *NamespacesTester) TestBootstrapWith(b Bootstrapper) {
- res, err := b.Bootstrap(nt.Namespaces)
+ ctx := context.NewContext()
+ defer ctx.Close()
+ res, err := b.Bootstrap(ctx, nt.Namespaces)
assert.NoError(nt.t, err)
nt.Results = res
}
@@ -544,21 +548,25 @@ func (nt *NamespacesTester) TestBootstrapWith(b Bootstrapper) {
// TestReadWith reads the current Namespaces with the
// provided bootstrap source.
func (nt *NamespacesTester) TestReadWith(s Source) {
- res, err := s.Read(nt.Namespaces)
+ ctx := context.NewContext()
+ defer ctx.Close()
+ res, err := s.Read(ctx, nt.Namespaces)
require.NoError(nt.t, err)
nt.Results = res
}
func validateRanges(ac xtime.Ranges, ex xtime.Ranges) error {
// Make range eclipses expected.
- removedRange := ex.RemoveRanges(ac)
+ removedRange := ex.Clone()
+ removedRange.RemoveRanges(ac)
if !removedRange.IsEmpty() {
return fmt.Errorf("actual range %v does not match expected range %v "+
"diff: %v", ac, ex, removedRange)
}
// Now make sure no ranges outside of expected.
- expectedWithAddedRanges := ex.AddRanges(ac)
+ expectedWithAddedRanges := ex.Clone()
+ expectedWithAddedRanges.AddRanges(ac)
if ex.Len() != expectedWithAddedRanges.Len() {
return fmt.Errorf("expected with re-added ranges not equal")
}
@@ -579,14 +587,14 @@ func validateShardTimeRanges(
r result.ShardTimeRanges,
ex result.ShardTimeRanges,
) error {
- if len(ex) != len(r) {
+ if ex.Len() != r.Len() {
return fmt.Errorf("expected %v and actual %v size mismatch", ex, r)
}
- seen := make(map[uint32]struct{}, len(r))
- for k, val := range r {
- expectedVal, found := ex[k]
- if !found {
+ seen := make(map[uint32]struct{}, r.Len())
+ for k, val := range r.Iter() {
+ expectedVal, ok := ex.Get(k)
+ if !ok {
return fmt.Errorf("expected shard map %v does not have shard %d; "+
"actual: %v", ex, k, r)
}
@@ -598,7 +606,7 @@ func validateShardTimeRanges(
seen[k] = struct{}{}
}
- for k := range ex {
+ for k := range ex.Iter() {
if _, beenFound := seen[k]; !beenFound {
return fmt.Errorf("shard %d in actual not found in expected %v", k, ex)
}
@@ -704,7 +712,7 @@ var _ gomock.Matcher = (*NamespaceMatcher)(nil)
// ShardTimeRangesMatcher is a matcher for ShardTimeRanges.
type ShardTimeRangesMatcher struct {
// Ranges are the expected ranges.
- Ranges map[uint32]xtime.Ranges
+ Ranges result.ShardTimeRanges
}
// Matches returns whether x is a match.
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/segment.go b/src/dbnode/storage/bootstrap_hooks.go
similarity index 66%
rename from src/dbnode/storage/bootstrap/bootstrapper/segment.go
rename to src/dbnode/storage/bootstrap_hooks.go
index 5720ec69ba..497de8f1f0 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/segment.go
+++ b/src/dbnode/storage/bootstrap_hooks.go
@@ -18,25 +18,32 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package bootstrapper
+package storage
-import "github.com/m3db/m3/src/m3ninx/index/segment"
+import (
+ "sync"
-// Segment wraps an index segment so we can easily determine whether or not the segment is persisted to disk.
-type Segment struct {
- persisted bool
- segment.Segment
+ "github.com/m3db/m3/src/dbnode/storage/bootstrap"
+)
+
+type bootstrapSourceEndHook struct {
+ shards []databaseShard
}
-// NewSegment returns an index segment w/ persistence metadata.
-func NewSegment(segment segment.Segment, persisted bool) *Segment {
- return &Segment{
- persisted: persisted,
- Segment: segment,
- }
+func newBootstrapSourceEndHook(shards []databaseShard) bootstrap.Hook {
+ return &bootstrapSourceEndHook{shards: shards}
}
-// IsPersisted returns whether or not the underlying segment was persisted to disk.
-func (s *Segment) IsPersisted() bool {
- return s.persisted
+func (h *bootstrapSourceEndHook) Run() error {
+ var wg sync.WaitGroup
+ for _, shard := range h.shards {
+ shard := shard
+ wg.Add(1)
+ go func() {
+ shard.UpdateFlushStates()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ return nil
}
diff --git a/src/dbnode/storage/bootstrap_test.go b/src/dbnode/storage/bootstrap_test.go
index ce2f7aa8a5..95337e996f 100644
--- a/src/dbnode/storage/bootstrap_test.go
+++ b/src/dbnode/storage/bootstrap_test.go
@@ -28,6 +28,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
+ "github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/golang/mock/gomock"
@@ -54,10 +55,10 @@ func TestDatabaseBootstrapWithBootstrapError(t *testing.T) {
namespaces := []databaseNamespace{ns}
db := NewMockdatabase(ctrl)
- db.EXPECT().GetOwnedNamespaces().Return(namespaces, nil)
+ db.EXPECT().OwnedNamespaces().Return(namespaces, nil)
m := NewMockdatabaseMediator(ctrl)
- m.EXPECT().DisableFileOps()
+ m.EXPECT().DisableFileOpsAndWait()
m.EXPECT().EnableFileOps().AnyTimes()
bsm := newBootstrapManager(db, m, opts).(*bootstrapManager)
@@ -65,13 +66,13 @@ func TestDatabaseBootstrapWithBootstrapError(t *testing.T) {
bsm.sleepFn = func(time.Duration) {}
gomock.InOrder(
- ns.EXPECT().PrepareBootstrap().Return([]databaseShard{}, nil),
+ ns.EXPECT().PrepareBootstrap(gomock.Any()).Return([]databaseShard{}, nil),
ns.EXPECT().Metadata().Return(meta),
ns.EXPECT().ID().Return(id),
ns.EXPECT().
- Bootstrap(gomock.Any()).
+ Bootstrap(gomock.Any(), gomock.Any()).
Return(fmt.Errorf("an error")).
- Do(func(bootstrapResult bootstrap.NamespaceResult) {
+ Do(func(ctx context.Context, bootstrapResult bootstrap.NamespaceResult) {
// After returning an error, make sure we don't re-enqueue.
bsm.bootstrapFn = func() error {
return nil
@@ -79,6 +80,9 @@ func TestDatabaseBootstrapWithBootstrapError(t *testing.T) {
}),
)
+ ctx := context.NewContext()
+ defer ctx.Close()
+
result, err := bsm.Bootstrap()
require.NoError(t, err)
@@ -97,7 +101,7 @@ func TestDatabaseBootstrapSubsequentCallsQueued(t *testing.T) {
}))
m := NewMockdatabaseMediator(ctrl)
- m.EXPECT().DisableFileOps()
+ m.EXPECT().DisableFileOpsAndWait()
m.EXPECT().EnableFileOps().AnyTimes()
db := NewMockdatabase(ctrl)
@@ -110,13 +114,13 @@ func TestDatabaseBootstrapSubsequentCallsQueued(t *testing.T) {
var wg sync.WaitGroup
wg.Add(1)
- ns.EXPECT().PrepareBootstrap().Return([]databaseShard{}, nil).AnyTimes()
+ ns.EXPECT().PrepareBootstrap(gomock.Any()).Return([]databaseShard{}, nil).AnyTimes()
ns.EXPECT().Metadata().Return(meta).AnyTimes()
ns.EXPECT().
- Bootstrap(gomock.Any()).
+ Bootstrap(gomock.Any(), gomock.Any()).
Return(nil).
- Do(func(arg0 interface{}) {
+ Do(func(arg0, arg1 interface{}) {
defer wg.Done()
// Enqueue the second bootstrap
@@ -129,17 +133,92 @@ func TestDatabaseBootstrapSubsequentCallsQueued(t *testing.T) {
bsm.RUnlock()
// Expect the second bootstrap call
- ns.EXPECT().Bootstrap(gomock.Any()).Return(nil)
+ ns.EXPECT().Bootstrap(gomock.Any(), gomock.Any()).Return(nil)
})
ns.EXPECT().
ID().
Return(id).
Times(2)
db.EXPECT().
- GetOwnedNamespaces().
+ OwnedNamespaces().
Return([]databaseNamespace{ns}, nil).
Times(2)
_, err = bsm.Bootstrap()
require.Nil(t, err)
}
+
+func TestDatabaseBootstrapBootstrapHooks(t *testing.T) {
+ ctrl := gomock.NewController(xtest.Reporter{T: t})
+ defer ctrl.Finish()
+
+ opts := DefaultTestOptions()
+ now := time.Now()
+ opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
+ return now
+ }))
+
+ m := NewMockdatabaseMediator(ctrl)
+ m.EXPECT().DisableFileOpsAndWait()
+ m.EXPECT().EnableFileOps().AnyTimes()
+
+ db := NewMockdatabase(ctrl)
+ bsm := newBootstrapManager(db, m, opts).(*bootstrapManager)
+
+ numNamespaces := 3
+ namespaces := make([]databaseNamespace, 0, 3)
+ for i := 0; i < numNamespaces; i++ {
+ ns := NewMockdatabaseNamespace(ctrl)
+ id := ident.StringID("testBootstrap")
+ meta, err := namespace.NewMetadata(id, namespace.NewOptions())
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ numShards := 8
+ shards := make([]databaseShard, 0, numShards)
+ for j := 0; j < numShards; j++ {
+ shard := NewMockdatabaseShard(ctrl)
+ shard.EXPECT().IsBootstrapped().Return(false)
+ shard.EXPECT().IsBootstrapped().Return(true)
+ shard.EXPECT().UpdateFlushStates().Times(2)
+ shard.EXPECT().ID().Return(uint32(j)).AnyTimes()
+ shards = append(shards, shard)
+ }
+
+ ns.EXPECT().PrepareBootstrap(gomock.Any()).Return(shards, nil).AnyTimes()
+ ns.EXPECT().Metadata().Return(meta).AnyTimes()
+
+ ns.EXPECT().
+ Bootstrap(gomock.Any(), gomock.Any()).
+ Return(nil).
+ Do(func(arg0, arg1 interface{}) {
+ defer wg.Done()
+
+ // Enqueue the second bootstrap
+ _, err := bsm.Bootstrap()
+ assert.Error(t, err)
+ assert.Equal(t, errBootstrapEnqueued, err)
+ assert.False(t, bsm.IsBootstrapped())
+ bsm.RLock()
+ assert.Equal(t, true, bsm.hasPending)
+ bsm.RUnlock()
+
+ // Expect the second bootstrap call
+ ns.EXPECT().Bootstrap(gomock.Any(), gomock.Any()).Return(nil)
+ })
+ ns.EXPECT().
+ ID().
+ Return(id).
+ Times(2)
+ namespaces = append(namespaces, ns)
+ }
+ db.EXPECT().
+ OwnedNamespaces().
+ Return(namespaces, nil).
+ Times(2)
+
+ _, err := bsm.Bootstrap()
+ require.Nil(t, err)
+}
diff --git a/src/dbnode/storage/cleanup.go b/src/dbnode/storage/cleanup.go
index 00cf1a418f..d91db51329 100644
--- a/src/dbnode/storage/cleanup.go
+++ b/src/dbnode/storage/cleanup.go
@@ -70,12 +70,15 @@ type cleanupManager struct {
deleteFilesFn deleteFilesFn
deleteInactiveDirectoriesFn deleteInactiveDirectoriesFn
- cleanupInProgress bool
+ warmFlushCleanupInProgress bool
+ coldFlushCleanupInProgress bool
metrics cleanupManagerMetrics
+ logger *zap.Logger
}
type cleanupManagerMetrics struct {
- status tally.Gauge
+ warmFlushCleanupStatus tally.Gauge
+ coldFlushCleanupStatus tally.Gauge
corruptCommitlogFile tally.Counter
corruptSnapshotFile tally.Counter
corruptSnapshotMetadataFile tally.Counter
@@ -89,7 +92,8 @@ func newCleanupManagerMetrics(scope tally.Scope) cleanupManagerMetrics {
sScope := scope.SubScope("snapshot")
smScope := scope.SubScope("snapshot-metadata")
return cleanupManagerMetrics{
- status: scope.Gauge("cleanup"),
+ warmFlushCleanupStatus: scope.Gauge("warm-flush-cleanup"),
+ coldFlushCleanupStatus: scope.Gauge("cold-flush-cleanup"),
corruptCommitlogFile: clScope.Counter("corrupt"),
corruptSnapshotFile: sScope.Counter("corrupt"),
corruptSnapshotMetadataFile: smScope.Counter("corrupt"),
@@ -119,47 +123,54 @@ func newCleanupManager(
deleteFilesFn: fs.DeleteFiles,
deleteInactiveDirectoriesFn: fs.DeleteInactiveDirectories,
metrics: newCleanupManagerMetrics(scope),
+ logger: opts.InstrumentOptions().Logger(),
}
}
-func (m *cleanupManager) Cleanup(t time.Time) error {
+func (m *cleanupManager) WarmFlushCleanup(t time.Time, isBootstrapped bool) error {
+ // Don't perform any cleanup if we are not boostrapped yet.
+ if !isBootstrapped {
+ m.logger.Debug("database is still bootstrapping, terminating cleanup")
+ return nil
+ }
+
m.Lock()
- m.cleanupInProgress = true
+ m.warmFlushCleanupInProgress = true
m.Unlock()
defer func() {
m.Lock()
- m.cleanupInProgress = false
+ m.warmFlushCleanupInProgress = false
m.Unlock()
}()
- multiErr := xerrors.NewMultiError()
- if err := m.cleanupDataFiles(t); err != nil {
- multiErr = multiErr.Add(fmt.Errorf(
- "encountered errors when cleaning up data files for %v: %v", t, err))
+ namespaces, err := m.database.OwnedNamespaces()
+ if err != nil {
+ return err
}
- if err := m.cleanupExpiredIndexFiles(t); err != nil {
+ multiErr := xerrors.NewMultiError()
+ if err := m.cleanupExpiredIndexFiles(t, namespaces); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when cleaning up index files for %v: %v", t, err))
}
- if err := m.deleteInactiveDataFiles(); err != nil {
+ if err := m.cleanupDuplicateIndexFiles(namespaces); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
- "encountered errors when deleting inactive data files for %v: %v", t, err))
+ "encountered errors when cleaning up index files for %v: %v", t, err))
}
- if err := m.deleteInactiveDataSnapshotFiles(); err != nil {
+ if err := m.deleteInactiveDataSnapshotFiles(namespaces); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when deleting inactive snapshot files for %v: %v", t, err))
}
- if err := m.deleteInactiveNamespaceFiles(); err != nil {
+ if err := m.deleteInactiveNamespaceFiles(namespaces); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when deleting inactive namespace files for %v: %v", t, err))
}
- if err := m.cleanupSnapshotsAndCommitlogs(); err != nil {
+ if err := m.cleanupSnapshotsAndCommitlogs(namespaces); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when cleaning up snapshot and commitlog files: %v", err))
}
@@ -167,26 +178,64 @@ func (m *cleanupManager) Cleanup(t time.Time) error {
return multiErr.FinalError()
}
+func (m *cleanupManager) ColdFlushCleanup(t time.Time, isBootstrapped bool) error {
+ // Don't perform any cleanup if we are not boostrapped yet.
+ if !isBootstrapped {
+ m.logger.Debug("database is still bootstrapping, terminating cleanup")
+ return nil
+ }
+
+ m.Lock()
+ m.coldFlushCleanupInProgress = true
+ m.Unlock()
+
+ defer func() {
+ m.Lock()
+ m.coldFlushCleanupInProgress = false
+ m.Unlock()
+ }()
+
+ namespaces, err := m.database.OwnedNamespaces()
+ if err != nil {
+ return err
+ }
+
+ multiErr := xerrors.NewMultiError()
+ if err := m.cleanupDataFiles(t, namespaces); err != nil {
+ multiErr = multiErr.Add(fmt.Errorf(
+ "encountered errors when cleaning up data files for %v: %v", t, err))
+ }
+
+ if err := m.deleteInactiveDataFiles(namespaces); err != nil {
+ multiErr = multiErr.Add(fmt.Errorf(
+ "encountered errors when deleting inactive data files for %v: %v", t, err))
+ }
+
+ return multiErr.FinalError()
+}
func (m *cleanupManager) Report() {
m.RLock()
- cleanupInProgress := m.cleanupInProgress
+ coldFlushCleanupInProgress := m.coldFlushCleanupInProgress
+ warmFlushCleanupInProgress := m.warmFlushCleanupInProgress
m.RUnlock()
- if cleanupInProgress {
- m.metrics.status.Update(1)
+ if coldFlushCleanupInProgress {
+ m.metrics.coldFlushCleanupStatus.Update(1)
+ } else {
+ m.metrics.coldFlushCleanupStatus.Update(0)
+ }
+
+ if warmFlushCleanupInProgress {
+ m.metrics.warmFlushCleanupStatus.Update(1)
} else {
- m.metrics.status.Update(0)
+ m.metrics.warmFlushCleanupStatus.Update(0)
}
}
-func (m *cleanupManager) deleteInactiveNamespaceFiles() error {
+func (m *cleanupManager) deleteInactiveNamespaceFiles(namespaces []databaseNamespace) error {
var namespaceDirNames []string
filePathPrefix := m.database.Options().CommitLogOptions().FilesystemOptions().FilePathPrefix()
dataDirPath := fs.DataDirPath(filePathPrefix)
- namespaces, err := m.database.GetOwnedNamespaces()
- if err != nil {
- return err
- }
for _, n := range namespaces {
namespaceDirNames = append(namespaceDirNames, n.ID().String())
@@ -197,27 +246,23 @@ func (m *cleanupManager) deleteInactiveNamespaceFiles() error {
// deleteInactiveDataFiles will delete data files for shards that the node no longer owns
// which can occur in the case of topology changes
-func (m *cleanupManager) deleteInactiveDataFiles() error {
- return m.deleteInactiveDataFileSetFiles(fs.NamespaceDataDirPath)
+func (m *cleanupManager) deleteInactiveDataFiles(namespaces []databaseNamespace) error {
+ return m.deleteInactiveDataFileSetFiles(fs.NamespaceDataDirPath, namespaces)
}
// deleteInactiveDataSnapshotFiles will delete snapshot files for shards that the node no longer owns
// which can occur in the case of topology changes
-func (m *cleanupManager) deleteInactiveDataSnapshotFiles() error {
- return m.deleteInactiveDataFileSetFiles(fs.NamespaceSnapshotsDirPath)
+func (m *cleanupManager) deleteInactiveDataSnapshotFiles(namespaces []databaseNamespace) error {
+ return m.deleteInactiveDataFileSetFiles(fs.NamespaceSnapshotsDirPath, namespaces)
}
-func (m *cleanupManager) deleteInactiveDataFileSetFiles(filesetFilesDirPathFn func(string, ident.ID) string) error {
+func (m *cleanupManager) deleteInactiveDataFileSetFiles(filesetFilesDirPathFn func(string, ident.ID) string, namespaces []databaseNamespace) error {
multiErr := xerrors.NewMultiError()
filePathPrefix := m.database.Options().CommitLogOptions().FilesystemOptions().FilePathPrefix()
- namespaces, err := m.database.GetOwnedNamespaces()
- if err != nil {
- return err
- }
for _, n := range namespaces {
var activeShards []string
namespaceDirPath := filesetFilesDirPathFn(filePathPrefix, n.ID())
- for _, s := range n.GetOwnedShards() {
+ for _, s := range n.OwnedShards() {
shard := fmt.Sprintf("%d", s.ID())
activeShards = append(activeShards, shard)
}
@@ -227,35 +272,27 @@ func (m *cleanupManager) deleteInactiveDataFileSetFiles(filesetFilesDirPathFn fu
return multiErr.FinalError()
}
-func (m *cleanupManager) cleanupDataFiles(t time.Time) error {
+func (m *cleanupManager) cleanupDataFiles(t time.Time, namespaces []databaseNamespace) error {
multiErr := xerrors.NewMultiError()
- namespaces, err := m.database.GetOwnedNamespaces()
- if err != nil {
- return err
- }
for _, n := range namespaces {
if !n.Options().CleanupEnabled() {
continue
}
earliestToRetain := retention.FlushTimeStart(n.Options().RetentionOptions(), t)
- shards := n.GetOwnedShards()
+ shards := n.OwnedShards()
multiErr = multiErr.Add(m.cleanupExpiredNamespaceDataFiles(earliestToRetain, shards))
multiErr = multiErr.Add(m.cleanupCompactedNamespaceDataFiles(shards))
}
return multiErr.FinalError()
}
-func (m *cleanupManager) cleanupExpiredIndexFiles(t time.Time) error {
- namespaces, err := m.database.GetOwnedNamespaces()
- if err != nil {
- return err
- }
+func (m *cleanupManager) cleanupExpiredIndexFiles(t time.Time, namespaces []databaseNamespace) error {
multiErr := xerrors.NewMultiError()
for _, n := range namespaces {
if !n.Options().CleanupEnabled() || !n.Options().IndexOptions().Enabled() {
continue
}
- idx, err := n.GetIndex()
+ idx, err := n.Index()
if err != nil {
multiErr = multiErr.Add(err)
continue
@@ -265,6 +302,22 @@ func (m *cleanupManager) cleanupExpiredIndexFiles(t time.Time) error {
return multiErr.FinalError()
}
+func (m *cleanupManager) cleanupDuplicateIndexFiles(namespaces []databaseNamespace) error {
+ multiErr := xerrors.NewMultiError()
+ for _, n := range namespaces {
+ if !n.Options().CleanupEnabled() || !n.Options().IndexOptions().Enabled() {
+ continue
+ }
+ idx, err := n.Index()
+ if err != nil {
+ multiErr = multiErr.Add(err)
+ continue
+ }
+ multiErr = multiErr.Add(idx.CleanupDuplicateFileSets())
+ }
+ return multiErr.FinalError()
+}
+
func (m *cleanupManager) cleanupExpiredNamespaceDataFiles(earliestToRetain time.Time, shards []databaseShard) error {
multiErr := xerrors.NewMultiError()
for _, shard := range shards {
@@ -317,17 +370,12 @@ func (m *cleanupManager) cleanupCompactedNamespaceDataFiles(shards []databaseSha
// 9. Delete all corrupt commitlog files (ignoring any commitlog files being actively written to.)
//
// This process is also modeled formally in TLA+ in the file `SnapshotsSpec.tla`.
-func (m *cleanupManager) cleanupSnapshotsAndCommitlogs() (finalErr error) {
+func (m *cleanupManager) cleanupSnapshotsAndCommitlogs(namespaces []databaseNamespace) (finalErr error) {
logger := m.opts.InstrumentOptions().Logger().With(
zap.String("comment",
"partial/corrupt files are expected as result of a restart (this is ok)"),
)
- namespaces, err := m.database.GetOwnedNamespaces()
- if err != nil {
- return err
- }
-
fsOpts := m.opts.CommitLogOptions().FilesystemOptions()
snapshotMetadatas, snapshotMetadataErrorsWithPaths, err := m.snapshotMetadataFilesFn(fsOpts)
if err != nil {
@@ -376,7 +424,7 @@ func (m *cleanupManager) cleanupSnapshotsAndCommitlogs() (finalErr error) {
}()
for _, ns := range namespaces {
- for _, s := range ns.GetOwnedShards() {
+ for _, s := range ns.OwnedShards() {
shardSnapshots, err := m.snapshotFilesFn(fsOpts.FilePathPrefix(), ns.ID(), s.ID())
if err != nil {
multiErr = multiErr.Add(fmt.Errorf("err reading snapshot files for ns: %s and shard: %d, err: %v", ns.ID(), s.ID(), err))
@@ -393,9 +441,9 @@ func (m *cleanupManager) cleanupSnapshotsAndCommitlogs() (finalErr error) {
m.metrics.corruptSnapshotFile.Inc(1)
logger.With(
zap.Error(err),
- zap.Strings("files", snapshot.AbsoluteFilepaths),
+ zap.Strings("files", snapshot.AbsoluteFilePaths),
).Warn("corrupt snapshot file during cleanup, marking files for deletion")
- filesToDelete = append(filesToDelete, snapshot.AbsoluteFilepaths...)
+ filesToDelete = append(filesToDelete, snapshot.AbsoluteFilePaths...)
continue
}
@@ -403,7 +451,7 @@ func (m *cleanupManager) cleanupSnapshotsAndCommitlogs() (finalErr error) {
// If the UUID of the snapshot files doesn't match the most recent snapshot
// then its safe to delete because it means we have a more recently complete set.
m.metrics.deletedSnapshotFile.Inc(1)
- filesToDelete = append(filesToDelete, snapshot.AbsoluteFilepaths...)
+ filesToDelete = append(filesToDelete, snapshot.AbsoluteFilePaths...)
}
}
}
@@ -412,7 +460,7 @@ func (m *cleanupManager) cleanupSnapshotsAndCommitlogs() (finalErr error) {
// Delete all snapshot metadatas prior to the most recent one.
for _, snapshot := range sortedSnapshotMetadatas[:len(sortedSnapshotMetadatas)-1] {
m.metrics.deletedSnapshotMetadataFile.Inc(1)
- filesToDelete = append(filesToDelete, snapshot.AbsoluteFilepaths()...)
+ filesToDelete = append(filesToDelete, snapshot.AbsoluteFilePaths()...)
}
// Delete corrupt snapshot metadata files.
diff --git a/src/dbnode/storage/cleanup_test.go b/src/dbnode/storage/cleanup_test.go
index 3557a7bf10..acc15dc251 100644
--- a/src/dbnode/storage/cleanup_test.go
+++ b/src/dbnode/storage/cleanup_test.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/retention"
+ xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
@@ -110,7 +111,7 @@ func TestCleanupManagerCleanupCommitlogsAndSnapshots(t *testing.T) {
Shard: shard,
VolumeIndex: 0,
},
- AbsoluteFilepaths: []string{fmt.Sprintf("/snapshots/%s/snapshot-filepath-%d", namespace, shard)},
+ AbsoluteFilePaths: []string{fmt.Sprintf("/snapshots/%s/snapshot-filepath-%d", namespace, shard)},
CachedSnapshotTime: testBlockStart,
CachedSnapshotID: testSnapshotUUID0,
},
@@ -134,7 +135,7 @@ func TestCleanupManagerCleanupCommitlogsAndSnapshots(t *testing.T) {
Shard: shard,
VolumeIndex: 0,
},
- AbsoluteFilepaths: []string{fmt.Sprintf("/snapshots/%s/snapshot-filepath-%d", namespace, shard)},
+ AbsoluteFilePaths: []string{fmt.Sprintf("/snapshots/%s/snapshot-filepath-%d", namespace, shard)},
CachedSnapshotTime: testBlockStart,
CachedSnapshotID: testSnapshotUUID0,
},
@@ -193,7 +194,7 @@ func TestCleanupManagerCleanupCommitlogsAndSnapshots(t *testing.T) {
Shard: shard,
VolumeIndex: 0,
},
- AbsoluteFilepaths: []string{fmt.Sprintf("/snapshots/%s/snapshot-filepath-%d", namespace, shard)},
+ AbsoluteFilePaths: []string{fmt.Sprintf("/snapshots/%s/snapshot-filepath-%d", namespace, shard)},
// Zero these out so it will try to look them up and return an error, indicating the files
// are corrupt.
CachedSnapshotTime: time.Time{},
@@ -296,12 +297,12 @@ func TestCleanupManagerCleanupCommitlogsAndSnapshots(t *testing.T) {
ns.EXPECT().ID().Return(ident.StringID(fmt.Sprintf("ns%d", i))).AnyTimes()
ns.EXPECT().Options().Return(nsOpts).AnyTimes()
ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(false, nil).AnyTimes()
- ns.EXPECT().GetOwnedShards().Return(shards).AnyTimes()
+ ns.EXPECT().OwnedShards().Return(shards).AnyTimes()
namespaces = append(namespaces, ns)
}
db := newMockdatabase(ctrl, namespaces...)
- db.EXPECT().GetOwnedNamespaces().Return(namespaces, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(namespaces, nil).AnyTimes()
mgr := newCleanupManager(db, newNoopFakeActiveLogs(), tally.NoopScope).(*cleanupManager)
mgr.opts = mgr.opts.SetCommitLogOptions(
mgr.opts.CommitLogOptions().
@@ -317,7 +318,7 @@ func TestCleanupManagerCleanupCommitlogsAndSnapshots(t *testing.T) {
return nil
}
- err := mgr.Cleanup(ts)
+ err := cleanup(mgr, ts, true)
if tc.expectErr {
require.Error(t, err)
} else {
@@ -329,7 +330,7 @@ func TestCleanupManagerCleanupCommitlogsAndSnapshots(t *testing.T) {
}
}
-func TestCleanupManagerNamespaceCleanup(t *testing.T) {
+func TestCleanupManagerNamespaceCleanupBootstrapped(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
@@ -348,18 +349,48 @@ func TestCleanupManagerNamespaceCleanup(t *testing.T) {
ns.EXPECT().ID().Return(ident.StringID("ns")).AnyTimes()
ns.EXPECT().Options().Return(nsOpts).AnyTimes()
ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(false, nil).AnyTimes()
- ns.EXPECT().GetOwnedShards().Return(nil).AnyTimes()
+ ns.EXPECT().OwnedShards().Return(nil).AnyTimes()
- idx := NewMocknamespaceIndex(ctrl)
- ns.EXPECT().GetIndex().Return(idx, nil)
+ idx := NewMockNamespaceIndex(ctrl)
+ ns.EXPECT().Index().Times(2).Return(idx, nil)
nses := []databaseNamespace{ns}
db := newMockdatabase(ctrl, ns)
- db.EXPECT().GetOwnedNamespaces().Return(nses, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(nses, nil).AnyTimes()
mgr := newCleanupManager(db, newNoopFakeActiveLogs(), tally.NoopScope).(*cleanupManager)
idx.EXPECT().CleanupExpiredFileSets(ts).Return(nil)
- require.NoError(t, mgr.Cleanup(ts))
+ idx.EXPECT().CleanupDuplicateFileSets().Return(nil)
+ require.NoError(t, cleanup(mgr, ts, true))
+}
+
+func TestCleanupManagerNamespaceCleanupNotBootstrapped(t *testing.T) {
+ ctrl := gomock.NewController(xtest.Reporter{T: t})
+ defer ctrl.Finish()
+
+ ts := timeFor(36000)
+ rOpts := retentionOptions.
+ SetRetentionPeriod(21600 * time.Second).
+ SetBlockSize(3600 * time.Second)
+ nsOpts := namespaceOptions.
+ SetRetentionOptions(rOpts).
+ SetCleanupEnabled(true).
+ SetIndexOptions(namespace.NewIndexOptions().
+ SetEnabled(true).
+ SetBlockSize(7200 * time.Second))
+
+ ns := NewMockdatabaseNamespace(ctrl)
+ ns.EXPECT().ID().Return(ident.StringID("ns")).AnyTimes()
+ ns.EXPECT().Options().Return(nsOpts).AnyTimes()
+ ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(false, nil).AnyTimes()
+ ns.EXPECT().OwnedShards().Return(nil).AnyTimes()
+
+ nses := []databaseNamespace{ns}
+ db := newMockdatabase(ctrl, ns)
+ db.EXPECT().OwnedNamespaces().Return(nses, nil).AnyTimes()
+
+ mgr := newCleanupManager(db, newNoopFakeActiveLogs(), tally.NoopScope).(*cleanupManager)
+ require.NoError(t, cleanup(mgr, ts, false))
}
// Test NS doesn't cleanup when flag is present
@@ -380,7 +411,7 @@ func TestCleanupManagerDoesntNeedCleanup(t *testing.T) {
namespaces = append(namespaces, ns)
}
db := newMockdatabase(ctrl, namespaces...)
- db.EXPECT().GetOwnedNamespaces().Return(namespaces, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(namespaces, nil).AnyTimes()
mgr := newCleanupManager(db, newNoopFakeActiveLogs(), tally.NoopScope).(*cleanupManager)
mgr.opts = mgr.opts.SetCommitLogOptions(
mgr.opts.CommitLogOptions().
@@ -392,7 +423,7 @@ func TestCleanupManagerDoesntNeedCleanup(t *testing.T) {
return nil
}
- require.NoError(t, mgr.Cleanup(ts))
+ require.NoError(t, cleanup(mgr, ts, true))
}
func TestCleanupDataAndSnapshotFileSetFiles(t *testing.T) {
@@ -409,16 +440,16 @@ func TestCleanupDataAndSnapshotFileSetFiles(t *testing.T) {
shard.EXPECT().CleanupExpiredFileSets(expectedEarliestToRetain).Return(nil)
shard.EXPECT().CleanupCompactedFileSets().Return(nil)
shard.EXPECT().ID().Return(uint32(0)).AnyTimes()
- ns.EXPECT().GetOwnedShards().Return([]databaseShard{shard}).AnyTimes()
+ ns.EXPECT().OwnedShards().Return([]databaseShard{shard}).AnyTimes()
ns.EXPECT().ID().Return(ident.StringID("nsID")).AnyTimes()
ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(false, nil).AnyTimes()
namespaces := []databaseNamespace{ns}
db := newMockdatabase(ctrl, namespaces...)
- db.EXPECT().GetOwnedNamespaces().Return(namespaces, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(namespaces, nil).AnyTimes()
mgr := newCleanupManager(db, newNoopFakeActiveLogs(), tally.NoopScope).(*cleanupManager)
- require.NoError(t, mgr.Cleanup(ts))
+ require.NoError(t, cleanup(mgr, ts, true))
}
type deleteInactiveDirectoriesCall struct {
@@ -438,13 +469,13 @@ func TestDeleteInactiveDataAndSnapshotFileSetFiles(t *testing.T) {
shard := NewMockdatabaseShard(ctrl)
shard.EXPECT().ID().Return(uint32(0)).AnyTimes()
- ns.EXPECT().GetOwnedShards().Return([]databaseShard{shard}).AnyTimes()
+ ns.EXPECT().OwnedShards().Return([]databaseShard{shard}).AnyTimes()
ns.EXPECT().ID().Return(ident.StringID("nsID")).AnyTimes()
ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(false, nil).AnyTimes()
namespaces := []databaseNamespace{ns}
db := newMockdatabase(ctrl, namespaces...)
- db.EXPECT().GetOwnedNamespaces().Return(namespaces, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(namespaces, nil).AnyTimes()
mgr := newCleanupManager(db, newNoopFakeActiveLogs(), tally.NoopScope).(*cleanupManager)
deleteInactiveDirectoriesCalls := []deleteInactiveDirectoriesCall{}
@@ -457,7 +488,7 @@ func TestDeleteInactiveDataAndSnapshotFileSetFiles(t *testing.T) {
}
mgr.deleteInactiveDirectoriesFn = deleteInactiveDirectoriesFn
- require.NoError(t, mgr.Cleanup(ts))
+ require.NoError(t, cleanup(mgr, ts, true))
expectedCalls := []deleteInactiveDirectoriesCall{
deleteInactiveDirectoriesCall{
@@ -486,7 +517,7 @@ func TestDeleteInactiveDataAndSnapshotFileSetFiles(t *testing.T) {
}
}
-func TestCleanupManagerPropagatesGetOwnedNamespacesError(t *testing.T) {
+func TestCleanupManagerPropagatesOwnedNamespacesError(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -496,13 +527,13 @@ func TestCleanupManagerPropagatesGetOwnedNamespacesError(t *testing.T) {
db.EXPECT().Options().Return(DefaultTestOptions()).AnyTimes()
db.EXPECT().Open().Return(nil)
db.EXPECT().Terminate().Return(nil)
- db.EXPECT().GetOwnedNamespaces().Return(nil, errDatabaseIsClosed).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(nil, errDatabaseIsClosed).AnyTimes()
mgr := newCleanupManager(db, newNoopFakeActiveLogs(), tally.NoopScope).(*cleanupManager)
require.NoError(t, db.Open())
require.NoError(t, db.Terminate())
- require.Error(t, mgr.Cleanup(ts))
+ require.Error(t, cleanup(mgr, ts, true))
}
func timeFor(s int64) time.Time {
@@ -526,3 +557,14 @@ func newFakeActiveLogs(activeLogs persist.CommitLogFiles) fakeActiveLogs {
activeLogs: activeLogs,
}
}
+
+func cleanup(
+ mgr databaseCleanupManager,
+ t time.Time,
+ isBootstrapped bool,
+) error {
+ multiErr := xerrors.NewMultiError()
+ multiErr = multiErr.Add(mgr.WarmFlushCleanup(t, isBootstrapped))
+ multiErr = multiErr.Add(mgr.ColdFlushCleanup(t, isBootstrapped))
+ return multiErr.FinalError()
+}
diff --git a/src/dbnode/storage/cluster/database.go b/src/dbnode/storage/cluster/database.go
index 0e4bf50427..75a77b7d6d 100644
--- a/src/dbnode/storage/cluster/database.go
+++ b/src/dbnode/storage/cluster/database.go
@@ -51,16 +51,22 @@ type newStorageDatabaseFn func(
) (storage.Database, error)
type databaseMetrics struct {
- initializing tally.Gauge
- leaving tally.Gauge
- available tally.Gauge
+ initializing tally.Gauge
+ leaving tally.Gauge
+ available tally.Gauge
+ shardsClusterTotal tally.Gauge
+ shardsClusterReplicas tally.Gauge
}
func newDatabaseMetrics(scope tally.Scope) databaseMetrics {
+ shardsScope := scope.SubScope("shards")
+ shardsClusterScope := scope.SubScope("shards-cluster")
return databaseMetrics{
- initializing: scope.Gauge("shards.initializing"),
- leaving: scope.Gauge("shards.leaving"),
- available: scope.Gauge("shards.available"),
+ initializing: shardsScope.Gauge("initializing"),
+ leaving: shardsScope.Gauge("leaving"),
+ available: shardsScope.Gauge("available"),
+ shardsClusterTotal: shardsClusterScope.Gauge("total"),
+ shardsClusterReplicas: shardsClusterScope.Gauge("replicas"),
}
}
@@ -319,7 +325,8 @@ func (d *clusterDB) activeTopologyWatch() {
}
func (d *clusterDB) analyzeAndReportShardStates() {
- entry, ok := d.watch.Get().LookupHostShardSet(d.hostID)
+ placement := d.watch.Get()
+ entry, ok := placement.LookupHostShardSet(d.hostID)
if !ok {
return
}
@@ -343,6 +350,9 @@ func (d *clusterDB) analyzeAndReportShardStates() {
d.metrics.initializing.Update(float64(initializing))
d.metrics.leaving.Update(float64(leaving))
d.metrics.available.Update(float64(available))
+ shardsClusterTotal := len(placement.ShardSet().All())
+ d.metrics.shardsClusterTotal.Update(float64(shardsClusterTotal))
+ d.metrics.shardsClusterReplicas.Update(float64(placement.Replicas()))
}
defer reportStats()
diff --git a/src/dbnode/storage/coldflush.go b/src/dbnode/storage/coldflush.go
new file mode 100644
index 0000000000..17913c4530
--- /dev/null
+++ b/src/dbnode/storage/coldflush.go
@@ -0,0 +1,199 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package storage
+
+import (
+ "sync"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/persist"
+ xerrors "github.com/m3db/m3/src/x/errors"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/uber-go/tally"
+ "go.uber.org/zap"
+)
+
+type coldFlushManager struct {
+ databaseCleanupManager
+ sync.RWMutex
+
+ log *zap.Logger
+ database database
+ pm persist.Manager
+ opts Options
+ // Retain using fileOpStatus here to be consistent w/ the
+ // filesystem manager since both are filesystem processes.
+ status fileOpStatus
+ isColdFlushing tally.Gauge
+ enabled bool
+}
+
+func newColdFlushManager(
+ database database,
+ pm persist.Manager,
+ opts Options,
+) databaseColdFlushManager {
+ instrumentOpts := opts.InstrumentOptions()
+ scope := instrumentOpts.MetricsScope().SubScope("fs")
+ // NB(bodu): cold flush cleanup doesn't require commit logs.
+ cm := newCleanupManager(database, nil, scope)
+
+ return &coldFlushManager{
+ databaseCleanupManager: cm,
+ log: instrumentOpts.Logger(),
+ database: database,
+ pm: pm,
+ opts: opts,
+ status: fileOpNotStarted,
+ isColdFlushing: scope.Gauge("cold-flush"),
+ enabled: true,
+ }
+}
+
+func (m *coldFlushManager) Disable() fileOpStatus {
+ m.Lock()
+ status := m.status
+ m.enabled = false
+ m.Unlock()
+ return status
+}
+
+func (m *coldFlushManager) Enable() fileOpStatus {
+ m.Lock()
+ status := m.status
+ m.enabled = true
+ m.Unlock()
+ return status
+}
+
+func (m *coldFlushManager) Status() fileOpStatus {
+ m.RLock()
+ status := m.status
+ m.RUnlock()
+ return status
+}
+
+func (m *coldFlushManager) Run(t time.Time) bool {
+ m.Lock()
+ if !m.shouldRunWithLock() {
+ m.Unlock()
+ return false
+ }
+ m.status = fileOpInProgress
+ m.Unlock()
+
+ // NB(xichen): perform data cleanup and flushing sequentially to minimize the impact of disk seeks.
+ // NB(r): Use invariant here since flush errors were introduced
+ // and not caught in CI or integration tests.
+ // When an invariant occurs in CI tests it panics so as to fail
+ // the build.
+ if err := m.ColdFlushCleanup(t, m.database.IsBootstrapped()); err != nil {
+ instrument.EmitAndLogInvariantViolation(m.opts.InstrumentOptions(),
+ func(l *zap.Logger) {
+ l.Error("error when cleaning up cold flush data", zap.Time("time", t), zap.Error(err))
+ })
+ }
+ if err := m.trackedColdFlush(); err != nil {
+ instrument.EmitAndLogInvariantViolation(m.opts.InstrumentOptions(),
+ func(l *zap.Logger) {
+ l.Error("error when cold flushing data", zap.Time("time", t), zap.Error(err))
+ })
+ }
+ m.Lock()
+ m.status = fileOpNotStarted
+ m.Unlock()
+ return true
+}
+
+func (m *coldFlushManager) trackedColdFlush() error {
+ // The cold flush process will persist any data that has been "loaded" into memory via
+ // the Load() API but has not yet been persisted durably. As a result, if the cold flush
+ // process completes without error, then we want to "decrement" the number of tracked bytes
+ // by however many were outstanding right before the cold flush began.
+ //
+ // For example:
+ // t0: Load 100 bytes --> (numLoadedBytes == 100, numPendingLoadedBytes == 0)
+ // t1: memTracker.MarkLoadedAsPending() --> (numLoadedBytes == 100, numPendingLoadedBytes == 100)
+ // t2: Load 200 bytes --> (numLoadedBytes == 300, numPendingLoadedBytes == 100)
+ // t3: ColdFlushStart()
+ // t4: Load 300 bytes --> (numLoadedBytes == 600, numPendingLoadedBytes == 100)
+ // t5: ColdFlushEnd()
+ // t6: memTracker.DecPendingLoadedBytes() --> (numLoadedBytes == 500, numPendingLoadedBytes == 0)
+ // t7: memTracker.MarkLoadedAsPending() --> (numLoadedBytes == 500, numPendingLoadedBytes == 500)
+ // t8: ColdFlushStart()
+ // t9: ColdFlushError()
+ // t10: memTracker.MarkLoadedAsPending() --> (numLoadedBytes == 500, numPendingLoadedBytes == 500)
+ // t11: ColdFlushStart()
+ // t12: ColdFlushEnd()
+ // t13: memTracker.DecPendingLoadedBytes() --> (numLoadedBytes == 0, numPendingLoadedBytes == 0)
+ memTracker := m.opts.MemoryTracker()
+ memTracker.MarkLoadedAsPending()
+
+ if err := m.coldFlush(); err != nil {
+ return err
+ }
+
+ // Only decrement if the cold flush was a success. In this case, the decrement will reduce the
+ // value by however many bytes had been tracked when the cold flush began.
+ memTracker.DecPendingLoadedBytes()
+ return nil
+}
+
+func (m *coldFlushManager) coldFlush() error {
+ namespaces, err := m.database.OwnedNamespaces()
+ if err != nil {
+ return err
+ }
+
+ flushPersist, err := m.pm.StartFlushPersist()
+ if err != nil {
+ return err
+ }
+
+ multiErr := xerrors.NewMultiError()
+ for _, ns := range namespaces {
+ if err = ns.ColdFlush(flushPersist); err != nil {
+ multiErr = multiErr.Add(err)
+ }
+ }
+
+ multiErr = multiErr.Add(flushPersist.DoneFlush())
+ err = multiErr.FinalError()
+ return err
+}
+
+func (m *coldFlushManager) Report() {
+ m.databaseCleanupManager.Report()
+
+ m.RLock()
+ status := m.status
+ m.RUnlock()
+ if status == fileOpInProgress {
+ m.isColdFlushing.Update(1)
+ } else {
+ m.isColdFlushing.Update(0)
+ }
+}
+
+func (m *coldFlushManager) shouldRunWithLock() bool {
+ return m.enabled && m.status != fileOpInProgress && m.database.IsBootstrapped()
+}
diff --git a/src/dbnode/storage/coldflush_test.go b/src/dbnode/storage/coldflush_test.go
new file mode 100644
index 0000000000..55eb3d355c
--- /dev/null
+++ b/src/dbnode/storage/coldflush_test.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package storage
+
+import (
+ "errors"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/golang/mock/gomock"
+ "github.com/m3db/m3/src/dbnode/persist"
+ "github.com/stretchr/testify/require"
+)
+
+func TestColdFlushManagerFlushAlreadyInProgress(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ var (
+ mockPersistManager = persist.NewMockManager(ctrl)
+ mockFlushPersist = persist.NewMockFlushPreparer(ctrl)
+
+ // Channels used to coordinate cold flushing
+ startCh = make(chan struct{}, 1)
+ doneCh = make(chan struct{}, 1)
+ )
+ defer func() {
+ close(startCh)
+ close(doneCh)
+ }()
+
+ mockFlushPersist.EXPECT().DoneFlush().Return(nil)
+ mockPersistManager.EXPECT().StartFlushPersist().Do(func() {
+ startCh <- struct{}{}
+ <-doneCh
+ }).Return(mockFlushPersist, nil)
+
+ testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
+ db := newMockdatabase(ctrl)
+ db.EXPECT().Options().Return(testOpts).AnyTimes()
+ db.EXPECT().IsBootstrapped().Return(true).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(nil, nil).AnyTimes()
+
+ cfm := newColdFlushManager(db, mockPersistManager, testOpts).(*coldFlushManager)
+ cfm.pm = mockPersistManager
+
+ var (
+ wg sync.WaitGroup
+ now = time.Unix(0, 0)
+ )
+ wg.Add(2)
+
+ // Goroutine 1 should successfully flush.
+ go func() {
+ defer wg.Done()
+ require.True(t, cfm.Run(now))
+ }()
+
+ // Goroutine 2 should indicate already flushing.
+ go func() {
+ defer wg.Done()
+
+ // Wait until we start the cold flushing process.
+ <-startCh
+
+ // Ensure it doesn't allow a parallel flush.
+ require.False(t, cfm.Run(now))
+
+ // Allow the cold flush to finish.
+ doneCh <- struct{}{}
+ }()
+
+ wg.Wait()
+
+}
+
+func TestColdFlushManagerFlushDoneFlushError(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ var (
+ fakeErr = errors.New("fake error while marking flush done")
+ mockPersistManager = persist.NewMockManager(ctrl)
+ mockFlushPersist = persist.NewMockFlushPreparer(ctrl)
+ )
+
+ mockFlushPersist.EXPECT().DoneFlush().Return(fakeErr)
+ mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil)
+
+ testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
+ db := newMockdatabase(ctrl)
+ db.EXPECT().Options().Return(testOpts).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(nil, nil)
+
+ cfm := newColdFlushManager(db, mockPersistManager, testOpts).(*coldFlushManager)
+ cfm.pm = mockPersistManager
+
+ require.EqualError(t, fakeErr, cfm.coldFlush().Error())
+}
diff --git a/src/dbnode/storage/database.go b/src/dbnode/storage/database.go
index c0c132ff5a..ab0e31a103 100644
--- a/src/dbnode/storage/database.go
+++ b/src/dbnode/storage/database.go
@@ -37,6 +37,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
@@ -93,8 +94,9 @@ type db struct {
opts Options
nowFn clock.NowFn
- nsWatch namespace.NamespaceWatch
- namespaces *databaseNamespacesMap
+ nsWatch namespace.NamespaceWatch
+ namespaces *databaseNamespacesMap
+ runtimeOptionsRegistry namespace.RuntimeOptionsManagerRegistry
commitLog commitlog.CommitLog
@@ -111,7 +113,7 @@ type db struct {
metrics databaseMetrics
log *zap.Logger
- writeBatchPool *ts.WriteBatchPool
+ writeBatchPool *writes.WriteBatchPool
}
type databaseMetrics struct {
@@ -126,6 +128,7 @@ type databaseMetrics struct {
unknownNamespaceQueryIDs tally.Counter
errQueryIDsIndexDisabled tally.Counter
errWriteTaggedIndexDisabled tally.Counter
+ pendingNamespaceChange tally.Gauge
}
func newDatabaseMetrics(scope tally.Scope) databaseMetrics {
@@ -143,6 +146,7 @@ func newDatabaseMetrics(scope tally.Scope) databaseMetrics {
unknownNamespaceQueryIDs: unknownNamespaceScope.Counter("query-ids"),
errQueryIDsIndexDisabled: indexDisabledScope.Counter("err-query-ids"),
errWriteTaggedIndexDisabled: indexDisabledScope.Counter("err-write-tagged"),
+ pendingNamespaceChange: scope.Gauge("pending-namespace-change"),
}
}
@@ -171,16 +175,17 @@ func NewDatabase(
)
d := &db{
- opts: opts,
- nowFn: nowFn,
- shardSet: shardSet,
- lastReceivedNewShards: nowFn(),
- namespaces: newDatabaseNamespacesMap(databaseNamespacesMapOptions{}),
- commitLog: commitLog,
- scope: scope,
- metrics: newDatabaseMetrics(scope),
- log: logger,
- writeBatchPool: opts.WriteBatchPool(),
+ opts: opts,
+ nowFn: nowFn,
+ shardSet: shardSet,
+ lastReceivedNewShards: nowFn(),
+ namespaces: newDatabaseNamespacesMap(databaseNamespacesMapOptions{}),
+ runtimeOptionsRegistry: opts.NamespaceRuntimeOptionsManagerRegistry(),
+ commitLog: commitLog,
+ scope: scope,
+ metrics: newDatabaseMetrics(scope),
+ log: logger,
+ writeBatchPool: opts.WriteBatchPool(),
}
databaseIOpts := iopts.SetMetricsScope(scope)
@@ -203,7 +208,7 @@ func NewDatabase(
// Wait till first namespaces value is received and set the value.
// Its important that this happens before the mediator is started to prevent
// a race condition where the namespaces haven't been initialized yet and
- // GetOwnedNamespaces() returns an empty slice which makes the cleanup logic
+ // OwnedNamespaces() returns an empty slice which makes the cleanup logic
// in the background Tick think it can clean up files that it shouldn't.
logger.Info("resolving namespaces with namespace watch")
<-watch.C()
@@ -242,6 +247,19 @@ func (d *db) UpdateOwnedNamespaces(newNamespaces namespace.Map) error {
d.log.Error("failed to update schema registry", zap.Error(err))
}
+ // Always update the runtime options if they were set so that correct
+ // runtime options are set in the runtime options registry before namespaces
+ // are actually created.
+ for _, namespaceMetadata := range newNamespaces.Metadatas() {
+ id := namespaceMetadata.ID().String()
+ runtimeOptsMgr := d.runtimeOptionsRegistry.RuntimeOptionsManager(id)
+ currRuntimeOpts := runtimeOptsMgr.Get()
+ setRuntimeOpts := namespaceMetadata.Options().RuntimeOptions()
+ if !currRuntimeOpts.Equal(setRuntimeOpts) {
+ runtimeOptsMgr.Update(setRuntimeOpts)
+ }
+ }
+
d.Lock()
defer d.Unlock()
@@ -261,7 +279,10 @@ func (d *db) UpdateOwnedNamespaces(newNamespaces namespace.Map) error {
// log that updates and removals are skipped
if len(removes) > 0 || len(updates) > 0 {
- d.log.Warn("skipping namespace removals and updates (except schema updates), restart process if you want changes to take effect.")
+ d.metrics.pendingNamespaceChange.Update(1)
+ d.log.Warn("skipping namespace removals and updates " +
+ "(except schema updates and runtime options), " +
+ "restart the process if you want changes to take effect")
}
// enqueue bootstraps if new namespaces
@@ -369,12 +390,15 @@ func (d *db) newDatabaseNamespaceWithLock(
err error
)
if mgr := d.opts.DatabaseBlockRetrieverManager(); mgr != nil {
- retriever, err = mgr.Retriever(md)
+ retriever, err = mgr.Retriever(md, d.shardSet)
if err != nil {
return nil, err
}
}
- return newDatabaseNamespace(md, d.shardSet, retriever, d, d.commitLog, d.opts)
+ nsID := md.ID().String()
+ runtimeOptsMgr := d.runtimeOptionsRegistry.RuntimeOptionsManager(nsID)
+ return newDatabaseNamespace(md, runtimeOptsMgr,
+ d.shardSet, retriever, d, d.commitLog, d.opts)
}
func (d *db) Options() Options {
@@ -537,6 +561,10 @@ func (d *db) terminateWithLock() error {
}
func (d *db) Terminate() error {
+ // NB(bodu): Disable file ops waits for current fs processes to
+ // finish before disabling.
+ d.mediator.DisableFileOpsAndWait()
+
d.Lock()
defer d.Unlock()
@@ -544,6 +572,10 @@ func (d *db) Terminate() error {
}
func (d *db) Close() error {
+ // NB(bodu): Disable file ops waits for current fs processes to
+ // finish before disabling.
+ d.mediator.DisableFileOpsAndWait()
+
d.Lock()
defer d.Unlock()
@@ -578,17 +610,22 @@ func (d *db) Write(
return err
}
- series, wasWritten, err := n.Write(ctx, id, timestamp, value, unit, annotation)
+ seriesWrite, err := n.Write(ctx, id, timestamp, value, unit, annotation)
if err != nil {
return err
}
- if !n.Options().WritesToCommitLog() || !wasWritten {
+ if !n.Options().WritesToCommitLog() || !seriesWrite.WasWritten {
return nil
}
- dp := ts.Datapoint{Timestamp: timestamp, Value: value}
- return d.commitLog.Write(ctx, series, dp, unit, annotation)
+ dp := ts.Datapoint{
+ Timestamp: timestamp,
+ TimestampNanos: xtime.ToUnixNano(timestamp),
+ Value: value,
+ }
+
+ return d.commitLog.Write(ctx, seriesWrite.Series, dp, unit, annotation)
}
func (d *db) WriteTagged(
@@ -607,20 +644,25 @@ func (d *db) WriteTagged(
return err
}
- series, wasWritten, err := n.WriteTagged(ctx, id, tags, timestamp, value, unit, annotation)
+ seriesWrite, err := n.WriteTagged(ctx, id, tags, timestamp, value, unit, annotation)
if err != nil {
return err
}
- if !n.Options().WritesToCommitLog() || !wasWritten {
+ if !n.Options().WritesToCommitLog() || !seriesWrite.WasWritten {
return nil
}
- dp := ts.Datapoint{Timestamp: timestamp, Value: value}
- return d.commitLog.Write(ctx, series, dp, unit, annotation)
+ dp := ts.Datapoint{
+ Timestamp: timestamp,
+ TimestampNanos: xtime.ToUnixNano(timestamp),
+ Value: value,
+ }
+
+ return d.commitLog.Write(ctx, seriesWrite.Series, dp, unit, annotation)
}
-func (d *db) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error) {
+func (d *db) BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error) {
n, err := d.namespaceFor(namespace)
if err != nil {
d.metrics.unknownNamespaceBatchWriter.Inc(1)
@@ -638,7 +680,7 @@ func (d *db) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, err
func (d *db) WriteBatch(
ctx context.Context,
namespace ident.ID,
- writer ts.BatchWriter,
+ writer writes.BatchWriter,
errHandler IndexedErrorHandler,
) error {
return d.writeBatch(ctx, namespace, writer, errHandler, false)
@@ -647,7 +689,7 @@ func (d *db) WriteBatch(
func (d *db) WriteTaggedBatch(
ctx context.Context,
namespace ident.ID,
- writer ts.BatchWriter,
+ writer writes.BatchWriter,
errHandler IndexedErrorHandler,
) error {
return d.writeBatch(ctx, namespace, writer, errHandler, true)
@@ -656,7 +698,7 @@ func (d *db) WriteTaggedBatch(
func (d *db) writeBatch(
ctx context.Context,
namespace ident.ID,
- writer ts.BatchWriter,
+ writer writes.BatchWriter,
errHandler IndexedErrorHandler,
tagged bool,
) error {
@@ -669,7 +711,7 @@ func (d *db) writeBatch(
}
defer sp.Finish()
- writes, ok := writer.(ts.WriteBatch)
+ writes, ok := writer.(writes.WriteBatch)
if !ok {
return errWriterDoesNotImplementWriteBatch
}
@@ -687,13 +729,12 @@ func (d *db) writeBatch(
iter := writes.Iter()
for i, write := range iter {
var (
- series ts.Series
- wasWritten bool
- err error
+ seriesWrite SeriesWrite
+ err error
)
if tagged {
- series, wasWritten, err = n.WriteTagged(
+ seriesWrite, err = n.WriteTagged(
ctx,
write.Write.Series.ID,
write.TagIter,
@@ -703,7 +744,7 @@ func (d *db) writeBatch(
write.Write.Annotation,
)
} else {
- series, wasWritten, err = n.Write(
+ seriesWrite, err = n.Write(
ctx,
write.Write.Series.ID,
write.Write.Datapoint.Timestamp,
@@ -716,6 +757,8 @@ func (d *db) writeBatch(
// Return errors with the original index provided by the caller so they
// can associate the error with the write that caused it.
errHandler.HandleError(write.OriginalIndex, err)
+ writes.SetError(i, err)
+ continue
}
// Need to set the outcome in the success case so the commitlog gets the
@@ -723,13 +766,37 @@ func (d *db) writeBatch(
// whose lifecycle lives longer than the span of this request, making them
// safe for use by the async commitlog. Need to set the outcome in the
// error case so that the commitlog knows to skip this entry.
- writes.SetOutcome(i, series, err)
- if !wasWritten || err != nil {
+ writes.SetSeries(i, seriesWrite.Series)
+
+ if !seriesWrite.WasWritten {
// This series has no additional information that needs to be written to
// the commit log; set this series to skip writing to the commit log.
writes.SetSkipWrite(i)
}
+
+ if seriesWrite.NeedsIndex {
+ writes.SetPendingIndex(i, seriesWrite.PendingIndexInsert)
+ }
+ }
+
+ // Now insert all pending index inserts together in one go
+ // to limit lock contention.
+ if pending := writes.PendingIndex(); len(pending) > 0 {
+ err := n.WritePendingIndexInserts(pending)
+ if err != nil {
+ // Mark those as pending index with an error.
+ // Note: this is an invariant error, queueing should never fail
+ // when so it's fine to fail all these entries if we can't
+ // write pending index inserts.
+ for i, write := range iter {
+ if write.PendingIndex {
+ errHandler.HandleError(write.OriginalIndex, err)
+ writes.SetError(i, err)
+ }
+ }
+ }
}
+
if !n.Options().WritesToCommitLog() {
// Finalize here because we can't rely on the commitlog to do it since
// we're not using it.
@@ -751,7 +818,8 @@ func (d *db) QueryIDs(
sp.LogFields(
opentracinglog.String("query", query.String()),
opentracinglog.String("namespace", namespace.String()),
- opentracinglog.Int("limit", opts.Limit),
+ opentracinglog.Int("seriesLimit", opts.SeriesLimit),
+ opentracinglog.Int("docsLimit", opts.DocsLimit),
xopentracing.Time("start", opts.StartInclusive),
xopentracing.Time("end", opts.EndExclusive),
)
@@ -779,7 +847,8 @@ func (d *db) AggregateQuery(
sp.LogFields(
opentracinglog.String("query", query.String()),
opentracinglog.String("namespace", namespace.String()),
- opentracinglog.Int("limit", aggResultOpts.QueryOptions.Limit),
+ opentracinglog.Int("seriesLimit", aggResultOpts.QueryOptions.SeriesLimit),
+ opentracinglog.Int("docsLimit", aggResultOpts.QueryOptions.DocsLimit),
xopentracing.Time("start", aggResultOpts.QueryOptions.StartInclusive),
xopentracing.Time("end", aggResultOpts.QueryOptions.EndExclusive),
)
@@ -905,10 +974,10 @@ func (d *db) IsBootstrappedAndDurable() bool {
return false
}
- lastBootstrapCompletionTime, ok := d.mediator.LastBootstrapCompletionTime()
+ lastBootstrapCompletionTimeNano, ok := d.mediator.LastBootstrapCompletionTime()
if !ok {
d.log.Debug("not bootstrapped and durable because: no last bootstrap completion time",
- zap.Time("lastBootstrapCompletionTime", lastBootstrapCompletionTime))
+ zap.Time("lastBootstrapCompletionTime", lastBootstrapCompletionTimeNano.ToTime()))
return false
}
@@ -916,14 +985,15 @@ func (d *db) IsBootstrappedAndDurable() bool {
lastSnapshotStartTime, ok := d.mediator.LastSuccessfulSnapshotStartTime()
if !ok {
d.log.Debug("not bootstrapped and durable because: no last snapshot start time",
- zap.Time("lastBootstrapCompletionTime", lastBootstrapCompletionTime),
- zap.Time("lastSnapshotStartTime", lastSnapshotStartTime),
+ zap.Time("lastBootstrapCompletionTime", lastBootstrapCompletionTimeNano.ToTime()),
+ zap.Time("lastSnapshotStartTime", lastSnapshotStartTime.ToTime()),
)
return false
}
var (
- hasSnapshottedPostBootstrap = lastSnapshotStartTime.After(lastBootstrapCompletionTime)
+ lastBootstrapCompletionTime = lastBootstrapCompletionTimeNano.ToTime()
+ hasSnapshottedPostBootstrap = lastSnapshotStartTime.After(lastBootstrapCompletionTimeNano)
hasBootstrappedSinceReceivingNewShards = lastBootstrapCompletionTime.After(d.lastReceivedNewShards) ||
lastBootstrapCompletionTime.Equal(d.lastReceivedNewShards)
isBootstrappedAndDurable = hasSnapshottedPostBootstrap &&
@@ -934,7 +1004,7 @@ func (d *db) IsBootstrappedAndDurable() bool {
d.log.Debug(
"not bootstrapped and durable because: has not snapshotted post bootstrap and/or has not bootstrapped since receiving new shards",
zap.Time("lastBootstrapCompletionTime", lastBootstrapCompletionTime),
- zap.Time("lastSnapshotStartTime", lastSnapshotStartTime),
+ zap.Time("lastSnapshotStartTime", lastSnapshotStartTime.ToTime()),
zap.Time("lastReceivedNewShards", d.lastReceivedNewShards),
)
return false
@@ -1007,7 +1077,7 @@ func (d *db) ownedNamespacesWithLock() []databaseNamespace {
return namespaces
}
-func (d *db) GetOwnedNamespaces() ([]databaseNamespace, error) {
+func (d *db) OwnedNamespaces() ([]databaseNamespace, error) {
d.RLock()
defer d.RUnlock()
if d.state == databaseClosed {
diff --git a/src/dbnode/storage/database_bootstrapped_test.go b/src/dbnode/storage/database_bootstrapped_test.go
index d7e2a3f9fb..51d1157c9e 100644
--- a/src/dbnode/storage/database_bootstrapped_test.go
+++ b/src/dbnode/storage/database_bootstrapped_test.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/golang/mock/gomock"
+ xtime "github.com/m3db/m3/src/x/time"
"github.com/stretchr/testify/assert"
)
@@ -34,16 +35,17 @@ func TestDatabaseIsBootstrappedAndDurable(t *testing.T) {
var (
validIsBootstrapped = true
- validShardSetAssignedAt = time.Now()
- validLastBootstrapCompletionTime = validShardSetAssignedAt.Add(time.Second)
- validLastSuccessfulSnapshotStartTime = validLastBootstrapCompletionTime.Add(time.Second)
+ validShardSetAssignedAt = xtime.ToUnixNano(time.Now())
+ validLastBootstrapCompletionTime = xtime.ToUnixNano(validShardSetAssignedAt.ToTime().Add(time.Second))
+ validLastSuccessfulSnapshotStartTime = xtime.ToUnixNano(validLastBootstrapCompletionTime.ToTime().Add(time.Second))
+ zeroTime xtime.UnixNano
)
testCases := []struct {
title string
isBootstrapped bool
- lastBootstrapCompletionTime time.Time
- lastSuccessfulSnapshotStartTime time.Time
- shardSetAssignedAt time.Time
+ lastBootstrapCompletionTime xtime.UnixNano
+ lastSuccessfulSnapshotStartTime xtime.UnixNano
+ shardSetAssignedAt xtime.UnixNano
expectedResult bool
}{
{
@@ -57,7 +59,7 @@ func TestDatabaseIsBootstrappedAndDurable(t *testing.T) {
{
title: "False if no last bootstrap completion time",
isBootstrapped: validIsBootstrapped,
- lastBootstrapCompletionTime: time.Time{},
+ lastBootstrapCompletionTime: zeroTime,
lastSuccessfulSnapshotStartTime: validLastSuccessfulSnapshotStartTime,
shardSetAssignedAt: validShardSetAssignedAt,
expectedResult: false,
@@ -66,7 +68,7 @@ func TestDatabaseIsBootstrappedAndDurable(t *testing.T) {
title: "False if no last successful snapshot start time",
isBootstrapped: validIsBootstrapped,
lastBootstrapCompletionTime: validLastBootstrapCompletionTime,
- lastSuccessfulSnapshotStartTime: time.Time{},
+ lastSuccessfulSnapshotStartTime: zeroTime,
shardSetAssignedAt: validShardSetAssignedAt,
expectedResult: false,
},
@@ -91,7 +93,7 @@ func TestDatabaseIsBootstrappedAndDurable(t *testing.T) {
isBootstrapped: validIsBootstrapped,
lastBootstrapCompletionTime: validLastBootstrapCompletionTime,
lastSuccessfulSnapshotStartTime: validLastSuccessfulSnapshotStartTime,
- shardSetAssignedAt: validLastBootstrapCompletionTime.Add(time.Second),
+ shardSetAssignedAt: validLastBootstrapCompletionTime + xtime.UnixNano(xtime.Second),
expectedResult: false,
},
{
@@ -113,7 +115,7 @@ func TestDatabaseIsBootstrappedAndDurable(t *testing.T) {
mediator := NewMockdatabaseMediator(ctrl)
d.mediator = mediator
- d.lastReceivedNewShards = tc.shardSetAssignedAt
+ d.lastReceivedNewShards = tc.shardSetAssignedAt.ToTime()
mediator.EXPECT().IsBootstrapped().Return(tc.isBootstrapped)
if !tc.isBootstrapped {
@@ -122,8 +124,8 @@ func TestDatabaseIsBootstrappedAndDurable(t *testing.T) {
return
}
- if tc.lastBootstrapCompletionTime.IsZero() {
- mediator.EXPECT().LastBootstrapCompletionTime().Return(time.Time{}, false)
+ if tc.lastBootstrapCompletionTime == 0 {
+ mediator.EXPECT().LastBootstrapCompletionTime().Return(zeroTime, false)
assert.Equal(t, tc.expectedResult, d.IsBootstrappedAndDurable())
// Early return because other mock calls will not get called.
return
@@ -131,8 +133,8 @@ func TestDatabaseIsBootstrappedAndDurable(t *testing.T) {
mediator.EXPECT().LastBootstrapCompletionTime().Return(tc.lastBootstrapCompletionTime, true)
- if tc.lastSuccessfulSnapshotStartTime.IsZero() {
- mediator.EXPECT().LastSuccessfulSnapshotStartTime().Return(time.Time{}, false)
+ if tc.lastSuccessfulSnapshotStartTime == 0 {
+ mediator.EXPECT().LastSuccessfulSnapshotStartTime().Return(zeroTime, false)
assert.Equal(t, tc.expectedResult, d.IsBootstrappedAndDurable())
// Early return because other mock calls will not get called.
return
diff --git a/src/dbnode/storage/database_test.go b/src/dbnode/storage/database_test.go
index 7bad3cacf5..46527674d7 100644
--- a/src/dbnode/storage/database_test.go
+++ b/src/dbnode/storage/database_test.go
@@ -42,6 +42,7 @@ import (
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
"github.com/m3db/m3/src/m3ninx/idx"
xclock "github.com/m3db/m3/src/x/clock"
@@ -145,7 +146,7 @@ func newMockdatabase(ctrl *gomock.Controller, ns ...databaseNamespace) *Mockdata
db := NewMockdatabase(ctrl)
db.EXPECT().Options().Return(DefaultTestOptions()).AnyTimes()
if len(ns) != 0 {
- db.EXPECT().GetOwnedNamespaces().Return(ns, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(ns, nil).AnyTimes()
}
return db
}
@@ -355,7 +356,7 @@ func TestDatabaseNamespaces(t *testing.T) {
assert.Equal(t, "testns2", result[1].ID().String())
}
-func TestGetOwnedNamespacesErrorIfClosed(t *testing.T) {
+func TestOwnedNamespacesErrorIfClosed(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -367,7 +368,7 @@ func TestGetOwnedNamespacesErrorIfClosed(t *testing.T) {
require.NoError(t, d.Open())
require.NoError(t, d.Terminate())
- _, err := d.GetOwnedNamespaces()
+ _, err := d.OwnedNamespaces()
require.Equal(t, errDatabaseIsClosed, err)
}
@@ -782,21 +783,23 @@ func testDatabaseNamespaceIndexFunctions(t *testing.T, commitlogEnabled bool) {
nsOptions := namespace.NewOptions().
SetWritesToCommitLog(commitlogEnabled)
- ns.EXPECT().GetOwnedShards().Return([]databaseShard{}).AnyTimes()
+ ns.EXPECT().OwnedShards().Return([]databaseShard{}).AnyTimes()
ns.EXPECT().Tick(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
ns.EXPECT().BootstrapState().Return(ShardBootstrapStates{}).AnyTimes()
ns.EXPECT().Options().Return(nsOptions).AnyTimes()
require.NoError(t, d.Open())
var (
- namespace = ident.StringID("testns")
- ctx = context.NewContext()
- id = ident.StringID("foo")
- tagsIter = ident.EmptyTagIterator
- s = ts.Series{
- ID: id,
- Tags: ident.Tags{},
- Namespace: namespace,
+ namespace = ident.StringID("testns")
+ ctx = context.NewContext()
+ id = ident.StringID("foo")
+ tagsIter = ident.EmptyTagIterator
+ seriesWrite = SeriesWrite{
+ Series: ts.Series{
+ ID: id,
+ Namespace: namespace,
+ },
+ WasWritten: true,
}
)
@@ -806,13 +809,13 @@ func testDatabaseNamespaceIndexFunctions(t *testing.T, commitlogEnabled bool) {
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
ns.EXPECT().WriteTagged(gomock.Any(), ident.NewIDMatcher("foo"), gomock.Any(),
- time.Time{}, 1.0, xtime.Second, nil).Return(s, true, nil)
+ time.Time{}, 1.0, xtime.Second, nil).Return(seriesWrite, nil)
require.NoError(t, d.WriteTagged(ctx, namespace,
id, tagsIter, time.Time{},
1.0, xtime.Second, nil))
ns.EXPECT().WriteTagged(gomock.Any(), ident.NewIDMatcher("foo"), gomock.Any(),
- time.Time{}, 1.0, xtime.Second, nil).Return(s, false, fmt.Errorf("random err"))
+ time.Time{}, 1.0, xtime.Second, nil).Return(SeriesWrite{}, fmt.Errorf("random err"))
require.Error(t, d.WriteTagged(ctx, namespace,
ident.StringID("foo"), ident.EmptyTagIterator, time.Time{},
1.0, xtime.Second, nil))
@@ -964,7 +967,7 @@ func testDatabaseWriteBatch(t *testing.T,
nsOptions := namespace.NewOptions().
SetWritesToCommitLog(commitlogEnabled)
- ns.EXPECT().GetOwnedShards().Return([]databaseShard{}).AnyTimes()
+ ns.EXPECT().OwnedShards().Return([]databaseShard{}).AnyTimes()
ns.EXPECT().Tick(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
ns.EXPECT().BootstrapState().Return(ShardBootstrapStates{}).AnyTimes()
ns.EXPECT().Options().Return(nsOptions).AnyTimes()
@@ -994,7 +997,7 @@ func testDatabaseWriteBatch(t *testing.T,
encodedTags, ok := encoder.Data()
require.True(t, ok)
- writes := []struct {
+ testWrites := []struct {
series string
t time.Time
v float64
@@ -1047,7 +1050,7 @@ func testDatabaseWriteBatch(t *testing.T,
require.NoError(t, err)
var i int
- for _, write := range writes {
+ for _, write := range testWrites {
// Write with the provided index as i*2 so we can assert later that the
// ErrorHandler is called with the provided index, not the actual position
// in the WriteBatch slice.
@@ -1055,34 +1058,39 @@ func testDatabaseWriteBatch(t *testing.T,
batchWriter.AddTagged(i*2, ident.StringID(write.series),
tagsIter.Duplicate(), encodedTags.Bytes(), write.t, write.v, xtime.Second, nil)
wasWritten := write.err == nil
- ns.EXPECT().WriteTagged(ctx, ident.NewIDMatcher(write.series), gomock.Any(),
- write.t, write.v, xtime.Second, nil).Return(
- ts.Series{
- ID: ident.StringID(write.series + "-updated"),
- Namespace: namespace,
- Tags: ident.Tags{},
- }, wasWritten, write.err)
+ ns.EXPECT().
+ WriteTagged(ctx, ident.NewIDMatcher(write.series), gomock.Any(),
+ write.t, write.v, xtime.Second, nil).
+ Return(SeriesWrite{
+ Series: ts.Series{
+ ID: ident.StringID(write.series + "-updated"),
+ Namespace: namespace,
+ }, WasWritten: wasWritten,
+ }, write.err)
} else {
batchWriter.Add(i*2, ident.StringID(write.series),
write.t, write.v, xtime.Second, nil)
wasWritten := write.err == nil
- ns.EXPECT().Write(ctx, ident.NewIDMatcher(write.series),
- write.t, write.v, xtime.Second, nil).Return(
- ts.Series{
- ID: ident.StringID(write.series + "-updated"),
- Namespace: namespace,
- Tags: ident.Tags{},
- }, wasWritten, write.err)
+ ns.EXPECT().
+ Write(ctx, ident.NewIDMatcher(write.series),
+ write.t, write.v, xtime.Second, nil).
+ Return(SeriesWrite{
+ Series: ts.Series{
+ ID: ident.StringID(write.series + "-updated"),
+ Namespace: namespace,
+ },
+ WasWritten: wasWritten,
+ }, write.err)
}
i++
}
errHandler := &fakeIndexedErrorHandler{}
if tagged {
- err = d.WriteTaggedBatch(ctx, namespace, batchWriter.(ts.WriteBatch),
+ err = d.WriteTaggedBatch(ctx, namespace, batchWriter.(writes.WriteBatch),
errHandler)
} else {
- err = d.WriteBatch(ctx, namespace, batchWriter.(ts.WriteBatch),
+ err = d.WriteBatch(ctx, namespace, batchWriter.(writes.WriteBatch),
errHandler)
}
@@ -1189,7 +1197,7 @@ func TestUpdateBatchWriterBasedOnShardResults(t *testing.T) {
ns := dbAddNewMockNamespace(ctrl, d, "testns")
nsOptions := namespace.NewOptions().
SetWritesToCommitLog(false)
- ns.EXPECT().GetOwnedShards().Return([]databaseShard{}).AnyTimes()
+ ns.EXPECT().OwnedShards().Return([]databaseShard{}).AnyTimes()
ns.EXPECT().Tick(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
ns.EXPECT().BootstrapState().Return(ShardBootstrapStates{}).AnyTimes()
ns.EXPECT().Options().Return(nsOptions).AnyTimes()
@@ -1197,45 +1205,56 @@ func TestUpdateBatchWriterBasedOnShardResults(t *testing.T) {
require.NoError(t, d.Open())
var (
- namespace = ident.StringID("testns")
- ctx = context.NewContext()
- series1 = ts.Series{UniqueIndex: 0}
- series2 = ts.Series{UniqueIndex: 1}
- series3 = ts.Series{UniqueIndex: 2}
- series4 = ts.Series{UniqueIndex: 3}
- err = fmt.Errorf("err")
+ namespace = ident.StringID("testns")
+ ctx = context.NewContext()
+ seriesWrite1 = SeriesWrite{Series: ts.Series{UniqueIndex: 0}, WasWritten: true}
+ seriesWrite2 = SeriesWrite{Series: ts.Series{UniqueIndex: 1}, WasWritten: true}
+ seriesWrite3 = SeriesWrite{Series: ts.Series{UniqueIndex: 2}, WasWritten: false}
+ seriesWrite4 = SeriesWrite{Series: ts.Series{UniqueIndex: 3}, WasWritten: false}
+ err = fmt.Errorf("err")
)
- ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any()).Return(series1, true, nil)
- ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any()).Return(series2, true, err)
- ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any()).Return(series3, false, err)
- ns.EXPECT().Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any()).Return(series4, false, nil)
+ gomock.InOrder(
+ ns.EXPECT().
+ Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any(), gomock.Any()).
+ Return(seriesWrite1, nil),
+ ns.EXPECT().
+ Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any(), gomock.Any()).
+ Return(seriesWrite2, err),
+ ns.EXPECT().
+ Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any(), gomock.Any()).
+ Return(seriesWrite3, err),
+ ns.EXPECT().
+ Write(ctx, gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any(), gomock.Any()).
+ Return(seriesWrite4, nil),
+ )
- write := ts.Write{
+ write := writes.Write{
Series: ts.Series{ID: ident.StringID("foo")},
}
- iters := []ts.BatchWrite{
+ iters := []writes.BatchWrite{
{Write: write},
{Write: write},
{Write: write},
{Write: write},
}
- batchWriter := ts.NewMockWriteBatch(ctrl)
- batchWriter.EXPECT().Iter().Return(iters)
- batchWriter.EXPECT().Finalize().Times(1)
- batchWriter.EXPECT().SetOutcome(0, series1, nil)
- batchWriter.EXPECT().SetOutcome(1, series2, err)
- batchWriter.EXPECT().SetSkipWrite(1)
- batchWriter.EXPECT().SetOutcome(2, series3, err)
- batchWriter.EXPECT().SetSkipWrite(2)
- batchWriter.EXPECT().SetOutcome(3, series4, nil)
- batchWriter.EXPECT().SetSkipWrite(3)
+ batchWriter := writes.NewMockWriteBatch(ctrl)
+ gomock.InOrder(
+ batchWriter.EXPECT().Iter().Return(iters),
+ batchWriter.EXPECT().SetSeries(0, seriesWrite1.Series),
+ batchWriter.EXPECT().SetError(1, err),
+ batchWriter.EXPECT().SetError(2, err),
+ batchWriter.EXPECT().SetSeries(3, seriesWrite4.Series),
+ batchWriter.EXPECT().SetSkipWrite(3),
+ batchWriter.EXPECT().PendingIndex().Return(nil),
+ batchWriter.EXPECT().Finalize(),
+ )
errHandler := &fakeIndexedErrorHandler{}
d.WriteBatch(ctx, namespace, batchWriter, errHandler)
diff --git a/src/dbnode/storage/dirty_series_new_map_gen.go b/src/dbnode/storage/dirty_series_new_map_gen.go
index 6e6cdb19c9..eb7a2a65cd 100644
--- a/src/dbnode/storage/dirty_series_new_map_gen.go
+++ b/src/dbnode/storage/dirty_series_new_map_gen.go
@@ -25,60 +25,29 @@
package storage
import (
- "github.com/m3db/m3/src/x/ident"
- "github.com/m3db/m3/src/x/pool"
+ "bytes"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
-// dirtySeriesMapOptions provides options used when created the map.
-type dirtySeriesMapOptions struct {
- InitialSize int
- KeyCopyPool pool.BytesPool
-}
-
// newDirtySeriesMap returns a new byte keyed map.
-func newDirtySeriesMap(opts dirtySeriesMapOptions) *dirtySeriesMap {
- var (
- copyFn dirtySeriesMapCopyFn
- finalizeFn dirtySeriesMapFinalizeFn
- )
- if pool := opts.KeyCopyPool; pool == nil {
- copyFn = func(k idAndBlockStart) idAndBlockStart {
- return idAndBlockStart{
- id: ident.BytesID(append([]byte(nil), k.id.Bytes()...)),
- blockStart: k.blockStart,
- }
- }
- } else {
- copyFn = func(k idAndBlockStart) idAndBlockStart {
- bytes := k.id.Bytes()
- keyLen := len(bytes)
- pooled := pool.Get(keyLen)[:keyLen]
- copy(pooled, bytes)
- return idAndBlockStart{
- id: ident.BytesID(pooled),
- blockStart: k.blockStart,
- }
- }
- finalizeFn = func(k idAndBlockStart) {
- if slice, ok := k.id.(ident.BytesID); ok {
- pool.Put(slice)
- }
- }
- }
+func newDirtySeriesMap() *dirtySeriesMap {
return _dirtySeriesMapAlloc(_dirtySeriesMapOptions{
hash: func(k idAndBlockStart) dirtySeriesMapHash {
hash := uint64(7)
- hash = 31*hash + xxhash.Sum64(k.id.Bytes())
+ hash = 31*hash + xxhash.Sum64(k.id)
hash = 31*hash + uint64(k.blockStart)
return dirtySeriesMapHash(hash)
},
equals: func(x, y idAndBlockStart) bool {
- return x.id.Equal(y.id) && x.blockStart == y.blockStart
+ // Note: Do cheap check (int comparison) first.
+ return x.blockStart == y.blockStart && bytes.Equal(x.id, y.id)
+ },
+ copy: func(k idAndBlockStart) idAndBlockStart {
+ return idAndBlockStart{
+ id: append(make([]byte, 0, len(k.id)), k.id...),
+ blockStart: k.blockStart,
+ }
},
- copy: copyFn,
- finalize: finalizeFn,
- initialSize: opts.InitialSize,
})
}
diff --git a/src/dbnode/storage/errors/types.go b/src/dbnode/storage/errors/types.go
index 6d55d62735..45c36be48b 100644
--- a/src/dbnode/storage/errors/types.go
+++ b/src/dbnode/storage/errors/types.go
@@ -33,12 +33,6 @@ var (
// ErrTooPast is returned for a write which is too far in the past.
ErrTooPast = xerrors.NewInvalidParamsError(errors.New("datapoint is too far in the past"))
-
- // ErrColdWritesNotEnabled is returned when cold writes are disabled
- // and a write is too far in the past or future. Note, the error intentionally
- // excludes anything regarding the cold writes feature until its release.
- ErrColdWritesNotEnabled = xerrors.NewInvalidParamsError(errors.New(
- "datapoint is too far in the past or future"))
)
// NewUnknownNamespaceError returns a new error indicating an unknown namespace parameter.
diff --git a/src/dbnode/storage/flush.go b/src/dbnode/storage/flush.go
index a2c50d5d9c..13b04d757b 100644
--- a/src/dbnode/storage/flush.go
+++ b/src/dbnode/storage/flush.go
@@ -26,13 +26,17 @@ import (
"sync"
"time"
+ "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/retention"
xerrors "github.com/m3db/m3/src/x/errors"
+ xtime "github.com/m3db/m3/src/x/time"
"github.com/pborman/uuid"
"github.com/uber-go/tally"
+ "go.uber.org/atomic"
+ "go.uber.org/zap"
)
var (
@@ -47,11 +51,36 @@ const (
// when we haven't begun either a flush or snapshot.
flushManagerNotIdle
flushManagerFlushInProgress
- flushManagerColdFlushInProgress
flushManagerSnapshotInProgress
flushManagerIndexFlushInProgress
)
+type flushManagerMetrics struct {
+ isFlushing tally.Gauge
+ isSnapshotting tally.Gauge
+ isIndexFlushing tally.Gauge
+ // This is a "debug" metric for making sure that the snapshotting process
+ // is not overly aggressive.
+ maxBlocksSnapshottedByNamespace tally.Gauge
+ dataWarmFlushDuration tally.Timer
+ dataSnapshotDuration tally.Timer
+ indexFlushDuration tally.Timer
+ commitLogRotationDuration tally.Timer
+}
+
+func newFlushManagerMetrics(scope tally.Scope) flushManagerMetrics {
+ return flushManagerMetrics{
+ isFlushing: scope.Gauge("flush"),
+ isSnapshotting: scope.Gauge("snapshot"),
+ isIndexFlushing: scope.Gauge("index-flush"),
+ maxBlocksSnapshottedByNamespace: scope.Gauge("max-blocks-snapshotted-by-namespace"),
+ dataWarmFlushDuration: scope.Timer("data-warm-flush-duration"),
+ dataSnapshotDuration: scope.Timer("data-snapshot-duration"),
+ indexFlushDuration: scope.Timer("index-flush-duration"),
+ commitLogRotationDuration: scope.Timer("commit-log-rotation-duration"),
+ }
+}
+
type flushManager struct {
sync.RWMutex
@@ -62,16 +91,13 @@ type flushManager struct {
// state is used to protect the flush manager against concurrent use,
// while flushInProgress and snapshotInProgress are more granular and
// are used for emitting granular gauges.
- state flushManagerState
- isFlushing tally.Gauge
- isColdFlushing tally.Gauge
- isSnapshotting tally.Gauge
- isIndexFlushing tally.Gauge
- // This is a "debug" metric for making sure that the snapshotting process
- // is not overly aggressive.
- maxBlocksSnapshottedByNamespace tally.Gauge
+ state flushManagerState
+ metrics flushManagerMetrics
- lastSuccessfulSnapshotStartTime time.Time
+ lastSuccessfulSnapshotStartTime atomic.Int64 // == xtime.UnixNano
+
+ logger *zap.Logger
+ nowFn clock.NowFn
}
func newFlushManager(
@@ -81,15 +107,13 @@ func newFlushManager(
) databaseFlushManager {
opts := database.Options()
return &flushManager{
- database: database,
- commitlog: commitlog,
- opts: opts,
- pm: opts.PersistManager(),
- isFlushing: scope.Gauge("flush"),
- isColdFlushing: scope.Gauge("cold-flush"),
- isSnapshotting: scope.Gauge("snapshot"),
- isIndexFlushing: scope.Gauge("index-flush"),
- maxBlocksSnapshottedByNamespace: scope.Gauge("max-blocks-snapshotted-by-namespace"),
+ database: database,
+ commitlog: commitlog,
+ opts: opts,
+ pm: opts.PersistManager(),
+ metrics: newFlushManagerMetrics(scope),
+ logger: opts.InstrumentOptions().Logger(),
+ nowFn: opts.ClockOptions().NowFn(),
}
}
@@ -105,66 +129,30 @@ func (m *flushManager) Flush(startTime time.Time) error {
defer m.setState(flushManagerIdle)
- namespaces, err := m.database.GetOwnedNamespaces()
+ namespaces, err := m.database.OwnedNamespaces()
if err != nil {
return err
}
- // Perform three separate loops through all the namespaces so that we can
+ // Perform two separate loops through all the namespaces so that we can
// emit better gauges, i.e. all the flushing for all the namespaces happens
- // at once, then all the cold flushes, then all the snapshotting. This is
+ // at once then all the snapshotting. This is
// also slightly better semantically because flushing should take priority
- // over cold flushes and snapshotting.
+ // over snapshotting.
//
// In addition, we need to make sure that for any given shard/blockStart
- // combination, we attempt a flush and then a cold flush before a snapshot
- // as the snapshotting process will attempt to snapshot any unflushed blocks
- // which would be wasteful if the block is already flushable.
+ // combination, we attempt a flush before a snapshot as the snapshotting process
+ // will attempt to snapshot blocks w/ unflushed data which would be wasteful if
+ // the block is already flushable.
multiErr := xerrors.NewMultiError()
if err = m.dataWarmFlush(namespaces, startTime); err != nil {
multiErr = multiErr.Add(err)
}
+ start := m.nowFn()
rotatedCommitlogID, err := m.commitlog.RotateLogs()
+ m.metrics.commitLogRotationDuration.Record(m.nowFn().Sub(start))
if err == nil {
- // The cold flush process will persist any data that has been "loaded" into memory via
- // the Load() API but has not yet been persisted durably. As a result, if the cold flush
- // process completes without error, then we want to "decrement" the number of tracked bytes
- // by however many were outstanding right before the cold flush began.
- //
- // For example:
- // t0: Load 100 bytes --> (numLoadedBytes == 100, numPendingLoadedBytes == 0)
- // t1: memTracker.MarkLoadedAsPending() --> (numLoadedBytes == 100, numPendingLoadedBytes == 100)
- // t2: Load 200 bytes --> (numLoadedBytes == 300, numPendingLoadedBytes == 100)
- // t3: ColdFlushStart()
- // t4: Load 300 bytes --> (numLoadedBytes == 600, numPendingLoadedBytes == 100)
- // t5: ColdFlushEnd()
- // t6: memTracker.DecPendingLoadedBytes() --> (numLoadedBytes == 500, numPendingLoadedBytes == 0)
- // t7: memTracker.MarkLoadedAsPending() --> (numLoadedBytes == 500, numPendingLoadedBytes == 500)
- // t8: ColdFlushStart()
- // t9: ColdFlushError()
- // t10: memTracker.MarkLoadedAsPending() --> (numLoadedBytes == 500, numPendingLoadedBytes == 500)
- // t11: ColdFlushStart()
- // t12: ColdFlushEnd()
- // t13: memTracker.DecPendingLoadedBytes() --> (numLoadedBytes == 0, numPendingLoadedBytes == 0)
- memTracker := m.opts.MemoryTracker()
- memTracker.MarkLoadedAsPending()
- if err = m.dataColdFlush(namespaces); err != nil {
- multiErr = multiErr.Add(err)
- // If cold flush fails, we can't proceed to snapshotting because
- // commit log cleanup logic uses the presence of a successful
- // snapshot checkpoint file to determine which commit log files are
- // safe to delete. Therefore if a cold flush fails and a snapshot
- // succeeds, the writes from the failed cold flush might be lost
- // when commit logs get cleaned up, leaving the node in an undurable
- // state such that if it restarted, it would not be able to recover
- // the cold writes from its commit log.
- return multiErr.FinalError()
- }
- // Only decrement if the cold flush was a success. In this case, the decrement will reduce the
- // value by however many bytes had been tracked when the cold flush began.
- memTracker.DecPendingLoadedBytes()
-
if err = m.dataSnapshot(namespaces, startTime, rotatedCommitlogID); err != nil {
multiErr = multiErr.Add(err)
}
@@ -189,7 +177,10 @@ func (m *flushManager) dataWarmFlush(
}
m.setState(flushManagerFlushInProgress)
- multiErr := xerrors.NewMultiError()
+ var (
+ start = m.nowFn()
+ multiErr = xerrors.NewMultiError()
+ )
for _, ns := range namespaces {
// Flush first because we will only snapshot if there are no outstanding flushes.
flushTimes, err := m.namespaceFlushTimes(ns, startTime)
@@ -208,30 +199,7 @@ func (m *flushManager) dataWarmFlush(
multiErr = multiErr.Add(err)
}
- return multiErr.FinalError()
-}
-
-func (m *flushManager) dataColdFlush(
- namespaces []databaseNamespace,
-) error {
- flushPersist, err := m.pm.StartFlushPersist()
- if err != nil {
- return err
- }
-
- m.setState(flushManagerColdFlushInProgress)
- multiErr := xerrors.NewMultiError()
- for _, ns := range namespaces {
- if err = ns.ColdFlush(flushPersist); err != nil {
- multiErr = multiErr.Add(err)
- }
- }
-
- err = flushPersist.DoneFlush()
- if err != nil {
- multiErr = multiErr.Add(err)
- }
-
+ m.metrics.dataWarmFlushDuration.Record(m.nowFn().Sub(start))
return multiErr.FinalError()
}
@@ -249,6 +217,7 @@ func (m *flushManager) dataSnapshot(
m.setState(flushManagerSnapshotInProgress)
var (
+ start = m.nowFn()
maxBlocksSnapshottedByNamespace = 0
multiErr = xerrors.NewMultiError()
)
@@ -278,15 +247,16 @@ func (m *flushManager) dataSnapshot(
}
}
}
- m.maxBlocksSnapshottedByNamespace.Update(float64(maxBlocksSnapshottedByNamespace))
+ m.metrics.maxBlocksSnapshottedByNamespace.Update(float64(maxBlocksSnapshottedByNamespace))
err = snapshotPersist.DoneSnapshot(snapshotID, rotatedCommitlogID)
multiErr = multiErr.Add(err)
finalErr := multiErr.FinalError()
if finalErr == nil {
- m.lastSuccessfulSnapshotStartTime = startTime
+ m.lastSuccessfulSnapshotStartTime.Store(int64(xtime.ToUnixNano(startTime)))
}
+ m.metrics.dataSnapshotDuration.Record(m.nowFn().Sub(start))
return finalErr
}
@@ -299,7 +269,10 @@ func (m *flushManager) indexFlush(
}
m.setState(flushManagerIndexFlushInProgress)
- multiErr := xerrors.NewMultiError()
+ var (
+ start = m.nowFn()
+ multiErr = xerrors.NewMultiError()
+ )
for _, ns := range namespaces {
var (
indexOpts = ns.Options().IndexOptions()
@@ -312,6 +285,7 @@ func (m *flushManager) indexFlush(
}
multiErr = multiErr.Add(indexFlush.DoneIndex())
+ m.metrics.indexFlushDuration.Record(m.nowFn().Sub(start))
return multiErr.FinalError()
}
@@ -321,27 +295,21 @@ func (m *flushManager) Report() {
m.RUnlock()
if state == flushManagerFlushInProgress {
- m.isFlushing.Update(1)
- } else {
- m.isFlushing.Update(0)
- }
-
- if state == flushManagerColdFlushInProgress {
- m.isColdFlushing.Update(1)
+ m.metrics.isFlushing.Update(1)
} else {
- m.isColdFlushing.Update(0)
+ m.metrics.isFlushing.Update(0)
}
if state == flushManagerSnapshotInProgress {
- m.isSnapshotting.Update(1)
+ m.metrics.isSnapshotting.Update(1)
} else {
- m.isSnapshotting.Update(0)
+ m.metrics.isSnapshotting.Update(0)
}
if state == flushManagerIndexFlushInProgress {
- m.isIndexFlushing.Update(1)
+ m.metrics.isIndexFlushing.Update(1)
} else {
- m.isIndexFlushing.Update(0)
+ m.metrics.isIndexFlushing.Update(0)
}
}
@@ -392,13 +360,8 @@ func (m *flushManager) namespaceSnapshotTimes(ns databaseNamespace, curr time.Ti
candidateTimes := timesInRange(earliest, latest, blockSize)
var loopErr error
return filterTimes(candidateTimes, func(t time.Time) bool {
- // Snapshot anything that is unflushed.
- needsFlush, err := ns.NeedsFlush(t, t)
- if err != nil {
- loopErr = err
- return false
- }
- return needsFlush
+ // NB(bodu): Snapshot everything since to account for cold writes/blocks.
+ return true
}), loopErr
}
@@ -422,6 +385,7 @@ func (m *flushManager) flushNamespaceWithTimes(
return multiErr.FinalError()
}
-func (m *flushManager) LastSuccessfulSnapshotStartTime() (time.Time, bool) {
- return m.lastSuccessfulSnapshotStartTime, !m.lastSuccessfulSnapshotStartTime.IsZero()
+func (m *flushManager) LastSuccessfulSnapshotStartTime() (xtime.UnixNano, bool) {
+ snapTime := xtime.UnixNano(m.lastSuccessfulSnapshotStartTime.Load())
+ return snapTime, snapTime > 0
}
diff --git a/src/dbnode/storage/flush_test.go b/src/dbnode/storage/flush_test.go
index 03457594d7..73fa37ca2e 100644
--- a/src/dbnode/storage/flush_test.go
+++ b/src/dbnode/storage/flush_test.go
@@ -34,6 +34,7 @@ import (
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
+ xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
@@ -110,7 +111,7 @@ func TestFlushManagerFlushAlreadyInProgress(t *testing.T) {
testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
db := newMockdatabase(ctrl)
db.EXPECT().Options().Return(testOpts).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return(nil, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(nil, nil).AnyTimes()
cl := commitlog.NewMockCommitLog(ctrl)
cl.EXPECT().RotateLogs().Return(testCommitlogFile, nil).AnyTimes()
@@ -141,17 +142,12 @@ func TestFlushManagerFlushAlreadyInProgress(t *testing.T) {
// Allow the flush to finish.
doneCh <- struct{}{}
- // Wait until we start the compaction process.
+ // Allow the snapshot to begin and finish.
<-startCh
// Ensure it doesn't allow a parallel flush.
require.Equal(t, errFlushOperationsInProgress, fm.Flush(now))
- // Allow the compaction to finish.
- doneCh <- struct{}{}
-
- // Allow the snapshot to begin and finish.
- <-startCh
doneCh <- struct{}{}
}()
@@ -171,11 +167,8 @@ func TestFlushManagerFlushDoneFlushError(t *testing.T) {
mockSnapshotPersist = persist.NewMockSnapshotPreparer(ctrl)
)
- gomock.InOrder(
- mockFlushPersist.EXPECT().DoneFlush().Return(fakeErr),
- mockFlushPersist.EXPECT().DoneFlush().Return(nil),
- )
- mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil).Times(2)
+ mockFlushPersist.EXPECT().DoneFlush().Return(fakeErr)
+ mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil)
mockSnapshotPersist.EXPECT().DoneSnapshot(gomock.Any(), testCommitlogFile).Return(nil)
mockPersistManager.EXPECT().StartSnapshotPersist(gomock.Any()).Return(mockSnapshotPersist, nil)
@@ -187,7 +180,7 @@ func TestFlushManagerFlushDoneFlushError(t *testing.T) {
testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
db := newMockdatabase(ctrl)
db.EXPECT().Options().Return(testOpts).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return(nil, nil)
+ db.EXPECT().OwnedNamespaces().Return(nil, nil)
cl := commitlog.NewMockCommitLog(ctrl)
cl.EXPECT().RotateLogs().Return(testCommitlogFile, nil).AnyTimes()
@@ -213,8 +206,8 @@ func TestFlushManagerNamespaceFlushTimesErr(t *testing.T) {
)
// Make sure DoneFlush is called despite encountering an error, once for snapshot and once for warm flush.
- mockFlushPersist.EXPECT().DoneFlush().Return(nil).Times(2)
- mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil).Times(2)
+ mockFlushPersist.EXPECT().DoneFlush().Return(nil)
+ mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil)
mockSnapshotPersist.EXPECT().DoneSnapshot(gomock.Any(), testCommitlogFile).Return(nil)
mockPersistManager.EXPECT().StartSnapshotPersist(gomock.Any()).Return(mockSnapshotPersist, nil)
@@ -232,9 +225,8 @@ func TestFlushManagerNamespaceFlushTimesErr(t *testing.T) {
ns.EXPECT().Options().Return(nsOpts).AnyTimes()
ns.EXPECT().ID().Return(defaultTestNs1ID).AnyTimes()
ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(false, fakeErr).AnyTimes()
- ns.EXPECT().ColdFlush(gomock.Any()).Return(nil).AnyTimes()
ns.EXPECT().Snapshot(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return([]databaseNamespace{ns}, nil)
+ db.EXPECT().OwnedNamespaces().Return([]databaseNamespace{ns}, nil)
cl := commitlog.NewMockCommitLog(ctrl)
cl.EXPECT().RotateLogs().Return(testCommitlogFile, nil).AnyTimes()
@@ -259,8 +251,8 @@ func TestFlushManagerFlushDoneSnapshotError(t *testing.T) {
mockSnapshotPersist = persist.NewMockSnapshotPreparer(ctrl)
)
- mockFlushPersist.EXPECT().DoneFlush().Return(nil).Times(2)
- mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil).Times(2)
+ mockFlushPersist.EXPECT().DoneFlush().Return(nil)
+ mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil)
mockSnapshotPersist.EXPECT().DoneSnapshot(gomock.Any(), testCommitlogFile).Return(fakeErr)
mockPersistManager.EXPECT().StartSnapshotPersist(gomock.Any()).Return(mockSnapshotPersist, nil)
@@ -272,7 +264,7 @@ func TestFlushManagerFlushDoneSnapshotError(t *testing.T) {
testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
db := newMockdatabase(ctrl)
db.EXPECT().Options().Return(testOpts).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return(nil, nil)
+ db.EXPECT().OwnedNamespaces().Return(nil, nil)
cl := commitlog.NewMockCommitLog(ctrl)
cl.EXPECT().RotateLogs().Return(testCommitlogFile, nil).AnyTimes()
@@ -294,8 +286,8 @@ func TestFlushManagerFlushDoneIndexError(t *testing.T) {
mockPersistManager = persist.NewMockManager(ctrl)
)
- mockFlushPersist.EXPECT().DoneFlush().Return(nil).Times(2)
- mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil).Times(2)
+ mockFlushPersist.EXPECT().DoneFlush().Return(nil)
+ mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil)
mockSnapshotPersist.EXPECT().DoneSnapshot(gomock.Any(), testCommitlogFile).Return(nil)
mockPersistManager.EXPECT().StartSnapshotPersist(gomock.Any()).Return(mockSnapshotPersist, nil)
@@ -308,7 +300,7 @@ func TestFlushManagerFlushDoneIndexError(t *testing.T) {
testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
db := newMockdatabase(ctrl)
db.EXPECT().Options().Return(testOpts).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return(nil, nil)
+ db.EXPECT().OwnedNamespaces().Return(nil, nil)
cl := commitlog.NewMockCommitLog(ctrl)
cl.EXPECT().RotateLogs().Return(testCommitlogFile, nil).AnyTimes()
@@ -330,7 +322,6 @@ func TestFlushManagerSkipNamespaceIndexingDisabled(t *testing.T) {
ns.EXPECT().ID().Return(defaultTestNs1ID).AnyTimes()
ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes()
ns.EXPECT().WarmFlush(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
- ns.EXPECT().ColdFlush(gomock.Any()).Return(nil).AnyTimes()
ns.EXPECT().Snapshot(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
var (
@@ -339,8 +330,8 @@ func TestFlushManagerSkipNamespaceIndexingDisabled(t *testing.T) {
mockPersistManager = persist.NewMockManager(ctrl)
)
- mockFlushPersist.EXPECT().DoneFlush().Return(nil).Times(2)
- mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil).Times(2)
+ mockFlushPersist.EXPECT().DoneFlush().Return(nil)
+ mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil)
mockSnapshotPersist.EXPECT().DoneSnapshot(gomock.Any(), testCommitlogFile).Return(nil)
mockPersistManager.EXPECT().StartSnapshotPersist(gomock.Any()).Return(mockSnapshotPersist, nil)
@@ -352,7 +343,7 @@ func TestFlushManagerSkipNamespaceIndexingDisabled(t *testing.T) {
testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
db := newMockdatabase(ctrl)
db.EXPECT().Options().Return(testOpts).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return([]databaseNamespace{ns}, nil)
+ db.EXPECT().OwnedNamespaces().Return([]databaseNamespace{ns}, nil)
cl := commitlog.NewMockCommitLog(ctrl)
cl.EXPECT().RotateLogs().Return(testCommitlogFile, nil).AnyTimes()
@@ -374,7 +365,6 @@ func TestFlushManagerNamespaceIndexingEnabled(t *testing.T) {
ns.EXPECT().ID().Return(defaultTestNs1ID).AnyTimes()
ns.EXPECT().NeedsFlush(gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes()
ns.EXPECT().WarmFlush(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
- ns.EXPECT().ColdFlush(gomock.Any()).Return(nil).AnyTimes()
ns.EXPECT().Snapshot(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
ns.EXPECT().FlushIndex(gomock.Any()).Return(nil)
@@ -384,8 +374,8 @@ func TestFlushManagerNamespaceIndexingEnabled(t *testing.T) {
mockPersistManager = persist.NewMockManager(ctrl)
)
- mockFlushPersist.EXPECT().DoneFlush().Return(nil).Times(2)
- mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil).Times(2)
+ mockFlushPersist.EXPECT().DoneFlush().Return(nil)
+ mockPersistManager.EXPECT().StartFlushPersist().Return(mockFlushPersist, nil)
mockSnapshotPersist.EXPECT().DoneSnapshot(gomock.Any(), testCommitlogFile).Return(nil)
mockPersistManager.EXPECT().StartSnapshotPersist(gomock.Any()).Return(mockSnapshotPersist, nil)
@@ -397,7 +387,7 @@ func TestFlushManagerNamespaceIndexingEnabled(t *testing.T) {
testOpts := DefaultTestOptions().SetPersistManager(mockPersistManager)
db := newMockdatabase(ctrl)
db.EXPECT().Options().Return(testOpts).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return([]databaseNamespace{ns}, nil)
+ db.EXPECT().OwnedNamespaces().Return([]databaseNamespace{ns}, nil)
cl := commitlog.NewMockCommitLog(ctrl)
cl.EXPECT().RotateLogs().Return(testCommitlogFile, nil).AnyTimes()
@@ -553,13 +543,10 @@ func TestFlushManagerFlushSnapshot(t *testing.T) {
ns.EXPECT().NeedsFlush(st, st).Return(false, nil)
}
- ns.EXPECT().ColdFlush(gomock.Any())
-
snapshotEnd := now.Add(bufferFuture).Truncate(blockSize)
num = numIntervals(start, snapshotEnd, blockSize)
for i := 0; i < num; i++ {
st := start.Add(time.Duration(i) * blockSize)
- ns.EXPECT().NeedsFlush(st, st).Return(true, nil)
ns.EXPECT().Snapshot(st, now, gomock.Any())
}
}
@@ -568,7 +555,7 @@ func TestFlushManagerFlushSnapshot(t *testing.T) {
lastSuccessfulSnapshot, ok := fm.LastSuccessfulSnapshotStartTime()
require.True(t, ok)
- require.Equal(t, now, lastSuccessfulSnapshot)
+ require.Equal(t, xtime.ToUnixNano(now), lastSuccessfulSnapshot)
}
type timesInOrder []time.Time
diff --git a/src/dbnode/storage/fs.go b/src/dbnode/storage/fs.go
index a4d85683dc..0fd7509982 100644
--- a/src/dbnode/storage/fs.go
+++ b/src/dbnode/storage/fs.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
+ "github.com/m3db/m3/src/x/instrument"
"go.uber.org/zap"
)
@@ -153,11 +154,21 @@ func (m *fileSystemManager) Run(
// NB(xichen): perform data cleanup and flushing sequentially to minimize the impact of disk seeks.
flushFn := func() {
- if err := m.Cleanup(t); err != nil {
- m.log.Error("error when cleaning up data", zap.Time("time", t), zap.Error(err))
+ // NB(r): Use invariant here since flush errors were introduced
+ // and not caught in CI or integration tests.
+ // When an invariant occurs in CI tests it panics so as to fail
+ // the build.
+ if err := m.WarmFlushCleanup(t, m.database.IsBootstrapped()); err != nil {
+ instrument.EmitAndLogInvariantViolation(m.opts.InstrumentOptions(),
+ func(l *zap.Logger) {
+ l.Error("error when cleaning up data", zap.Time("time", t), zap.Error(err))
+ })
}
if err := m.Flush(t); err != nil {
- m.log.Error("error when flushing data", zap.Time("time", t), zap.Error(err))
+ instrument.EmitAndLogInvariantViolation(m.opts.InstrumentOptions(),
+ func(l *zap.Logger) {
+ l.Error("error when flushing data", zap.Time("time", t), zap.Error(err))
+ })
}
m.Lock()
m.status = fileOpNotStarted
diff --git a/src/dbnode/storage/fs_merge_with_mem.go b/src/dbnode/storage/fs_merge_with_mem.go
index 5a61680d82..ab409c1d17 100644
--- a/src/dbnode/storage/fs_merge_with_mem.go
+++ b/src/dbnode/storage/fs_merge_with_mem.go
@@ -23,6 +23,7 @@ package storage
import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/x/context"
@@ -41,6 +42,7 @@ type fsMergeWithMem struct {
retriever series.QueryableBlockRetriever
dirtySeries *dirtySeriesMap
dirtySeriesToWrite map[xtime.UnixNano]*idList
+ reuseableID *ident.ReuseableBytesID
}
func newFSMergeWithMem(
@@ -54,6 +56,7 @@ func newFSMergeWithMem(
retriever: retriever,
dirtySeries: dirtySeries,
dirtySeriesToWrite: dirtySeriesToWrite,
+ reuseableID: ident.NewReuseableBytesID(),
}
}
@@ -64,7 +67,10 @@ func (m *fsMergeWithMem) Read(
nsCtx namespace.Context,
) ([]xio.BlockReader, bool, error) {
// Check if this series is in memory (and thus requires merging).
- element, exists := m.dirtySeries.Get(idAndBlockStart{blockStart: blockStart, id: seriesID})
+ element, exists := m.dirtySeries.Get(idAndBlockStart{
+ blockStart: blockStart,
+ id: seriesID.Bytes(),
+ })
if !exists {
return nil, false, nil
}
@@ -75,7 +81,11 @@ func (m *fsMergeWithMem) Read(
// it.
m.dirtySeriesToWrite[blockStart].Remove(element)
- return m.fetchBlocks(ctx, element.Value, blockStart, nsCtx)
+ result, ok, err := m.fetchBlocks(ctx, seriesID, blockStart, nsCtx)
+ if err != nil {
+ return nil, false, err
+ }
+ return result.Blocks, ok, nil
}
func (m *fsMergeWithMem) fetchBlocks(
@@ -83,24 +93,24 @@ func (m *fsMergeWithMem) fetchBlocks(
id ident.ID,
blockStart xtime.UnixNano,
nsCtx namespace.Context,
-) ([]xio.BlockReader, bool, error) {
+) (block.FetchBlockResult, bool, error) {
startTime := blockStart.ToTime()
currVersion, err := m.retriever.RetrievableBlockColdVersion(startTime)
if err != nil {
- return nil, false, err
+ return block.FetchBlockResult{}, false, err
}
nextVersion := currVersion + 1
- blocks, err := m.shard.FetchBlocksForColdFlush(ctx, id, startTime, nextVersion, nsCtx)
+ result, err := m.shard.FetchBlocksForColdFlush(ctx, id, startTime, nextVersion, nsCtx)
if err != nil {
- return nil, false, err
+ return block.FetchBlockResult{}, false, err
}
- if len(blocks) > 0 {
- return blocks, true, nil
+ if len(result.Blocks) > 0 {
+ return result, true, nil
}
- return nil, false, nil
+ return block.FetchBlockResult{}, false, nil
}
// The data passed to ForEachRemaining (through the fs.ForEachRemainingFn) is
@@ -113,33 +123,19 @@ func (m *fsMergeWithMem) ForEachRemaining(
fn fs.ForEachRemainingFn,
nsCtx namespace.Context,
) error {
+ reuseableID := m.reuseableID
seriesList := m.dirtySeriesToWrite[blockStart]
for seriesElement := seriesList.Front(); seriesElement != nil; seriesElement = seriesElement.Next() {
- seriesID := seriesElement.Value
-
- // TODO(r): We should really not be looking this up per series element
- // and just keep it in the linked list next to the series ID.
- tags, ok, err := m.shard.TagsFromSeriesID(seriesID)
+ seriesMetadata := seriesElement.Value
+ reuseableID.Reset(seriesMetadata.ID)
+ mergeWithData, hasData, err := m.fetchBlocks(ctx, reuseableID, blockStart, nsCtx)
if err != nil {
return err
}
- if !ok {
- // Receiving not ok means that the series was not found, for some
- // reason like it falling out of retention, therefore we skip this
- // series and continue.
- // TODO(r): This should actually be an invariant error - these should not
- // be evicted until a flush otherwise the durability guarantee was not
- // upheld.
- continue
- }
- mergeWithData, hasData, err := m.fetchBlocks(ctx, seriesID, blockStart, nsCtx)
- if err != nil {
- return err
- }
if hasData {
- err = fn(seriesID, tags, mergeWithData)
+ err = fn(seriesMetadata, mergeWithData)
if err != nil {
return err
}
diff --git a/src/dbnode/storage/fs_merge_with_mem_test.go b/src/dbnode/storage/fs_merge_with_mem_test.go
index 698d285f36..3e982e1321 100644
--- a/src/dbnode/storage/fs_merge_with_mem_test.go
+++ b/src/dbnode/storage/fs_merge_with_mem_test.go
@@ -25,10 +25,13 @@ import (
"testing"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
@@ -42,7 +45,7 @@ type dirtyData struct {
}
func TestRead(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
shard := NewMockdatabaseShard(ctrl)
@@ -50,10 +53,12 @@ func TestRead(t *testing.T) {
version := 0
ctx := context.NewContext()
nsCtx := namespace.Context{}
- fetchedBlocks := []xio.BlockReader{xio.BlockReader{}}
+ result := block.FetchBlockResult{
+ Blocks: []xio.BlockReader{xio.BlockReader{}},
+ }
retriever.EXPECT().RetrievableBlockColdVersion(gomock.Any()).Return(version, nil).AnyTimes()
- dirtySeries := newDirtySeriesMap(dirtySeriesMapOptions{})
+ dirtySeries := newDirtySeriesMap()
dirtySeriesToWrite := make(map[xtime.UnixNano]*idList)
data := []dirtyData{
@@ -73,18 +78,21 @@ func TestRead(t *testing.T) {
addDirtySeries(dirtySeries, dirtySeriesToWrite, d.id, d.start)
shard.EXPECT().
FetchBlocksForColdFlush(gomock.Any(), d.id, d.start.ToTime(), version+1, nsCtx).
- Return(fetchedBlocks, nil)
+ Return(result, nil)
}
mergeWith := newFSMergeWithMem(shard, retriever, dirtySeries, dirtySeriesToWrite)
for _, d := range data {
- require.True(t, dirtySeries.Contains(idAndBlockStart{blockStart: d.start, id: d.id}))
+ require.True(t, dirtySeries.Contains(idAndBlockStart{
+ blockStart: d.start,
+ id: d.id.Bytes(),
+ }))
beforeLen := dirtySeriesToWrite[d.start].Len()
res, exists, err := mergeWith.Read(ctx, d.id, d.start, nsCtx)
require.NoError(t, err)
assert.True(t, exists)
- assert.Equal(t, fetchedBlocks, res)
+ assert.Equal(t, result.Blocks, res)
// Assert that the Read call removes the element from the "to write"
// list.
assert.Equal(t, beforeLen-1, dirtySeriesToWrite[d.start].Len())
@@ -101,7 +109,7 @@ func TestRead(t *testing.T) {
addDirtySeries(dirtySeries, dirtySeriesToWrite, badFetchID, 11)
shard.EXPECT().
FetchBlocksForColdFlush(gomock.Any(), badFetchID, gomock.Any(), version+1, nsCtx).
- Return(nil, errors.New("fetch error"))
+ Return(block.FetchBlockResult{}, errors.New("fetch error"))
res, exists, err = mergeWith.Read(ctx, badFetchID, 11, nsCtx)
assert.Nil(t, res)
assert.False(t, exists)
@@ -112,7 +120,7 @@ func TestRead(t *testing.T) {
addDirtySeries(dirtySeries, dirtySeriesToWrite, emptyDataID, 12)
shard.EXPECT().
FetchBlocksForColdFlush(gomock.Any(), emptyDataID, gomock.Any(), version+1, nsCtx).
- Return(nil, nil)
+ Return(block.FetchBlockResult{}, nil)
res, exists, err = mergeWith.Read(ctx, emptyDataID, 12, nsCtx)
assert.Nil(t, res)
assert.False(t, exists)
@@ -120,7 +128,7 @@ func TestRead(t *testing.T) {
}
func TestForEachRemaining(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
shard := NewMockdatabaseShard(ctrl)
@@ -128,10 +136,12 @@ func TestForEachRemaining(t *testing.T) {
version := 0
ctx := context.NewContext()
nsCtx := namespace.Context{}
- fetchedBlocks := []xio.BlockReader{xio.BlockReader{}}
+ result := block.FetchBlockResult{
+ Blocks: []xio.BlockReader{xio.BlockReader{}},
+ }
retriever.EXPECT().RetrievableBlockColdVersion(gomock.Any()).Return(version, nil).AnyTimes()
- dirtySeries := newDirtySeriesMap(dirtySeriesMapOptions{})
+ dirtySeries := newDirtySeriesMap()
dirtySeriesToWrite := make(map[xtime.UnixNano]*idList)
id0 := ident.StringID("id0")
@@ -162,65 +172,59 @@ func TestForEachRemaining(t *testing.T) {
mergeWith := newFSMergeWithMem(shard, retriever, dirtySeries, dirtySeriesToWrite)
- var forEachCalls []ident.ID
- shard.EXPECT().TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, true, nil).Times(2)
+ var forEachCalls []doc.Document
shard.EXPECT().
- FetchBlocksForColdFlush(gomock.Any(), id0, xtime.UnixNano(0).ToTime(), version+1, gomock.Any()).
- Return(fetchedBlocks, nil)
+ FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id0"),
+ xtime.UnixNano(0).ToTime(), version+1, gomock.Any()).
+ Return(result, nil)
shard.EXPECT().
- FetchBlocksForColdFlush(gomock.Any(), id1, xtime.UnixNano(0).ToTime(), version+1, gomock.Any()).
- Return(fetchedBlocks, nil)
- mergeWith.ForEachRemaining(ctx, 0, func(seriesID ident.ID, tags ident.Tags, data []xio.BlockReader) error {
- forEachCalls = append(forEachCalls, seriesID)
+ FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id1"),
+ xtime.UnixNano(0).ToTime(), version+1, gomock.Any()).
+ Return(result, nil)
+ mergeWith.ForEachRemaining(ctx, 0, func(seriesMetadata doc.Document, result block.FetchBlockResult) error {
+ forEachCalls = append(forEachCalls, seriesMetadata)
return nil
}, nsCtx)
require.Len(t, forEachCalls, 2)
- assert.Equal(t, id0, forEachCalls[0])
- assert.Equal(t, id1, forEachCalls[1])
+ assert.Equal(t, id0.Bytes(), forEachCalls[0].ID)
+ assert.Equal(t, id1.Bytes(), forEachCalls[1].ID)
// Reset expected calls.
forEachCalls = forEachCalls[:0]
// Read id3 at block start 1, so id2 and id4 should be remaining for block
// start 1.
shard.EXPECT().
- FetchBlocksForColdFlush(gomock.Any(), id3, xtime.UnixNano(1).ToTime(), version+1, nsCtx).
- Return(fetchedBlocks, nil)
+ FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id3"),
+ xtime.UnixNano(1).ToTime(), version+1, nsCtx).
+ Return(result, nil)
res, exists, err := mergeWith.Read(ctx, id3, 1, nsCtx)
require.NoError(t, err)
assert.True(t, exists)
- assert.Equal(t, fetchedBlocks, res)
- shard.EXPECT().TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, true, nil).Times(2)
+ assert.Equal(t, result.Blocks, res)
shard.EXPECT().
- FetchBlocksForColdFlush(gomock.Any(), id2, xtime.UnixNano(1).ToTime(), version+1, gomock.Any()).
- Return(fetchedBlocks, nil)
+ FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id2"),
+ xtime.UnixNano(1).ToTime(), version+1, gomock.Any()).
+ Return(result, nil)
shard.EXPECT().
- FetchBlocksForColdFlush(gomock.Any(), id4, xtime.UnixNano(1).ToTime(), version+1, gomock.Any()).
- Return(fetchedBlocks, nil)
- err = mergeWith.ForEachRemaining(ctx, 1, func(seriesID ident.ID, tags ident.Tags, data []xio.BlockReader) error {
- forEachCalls = append(forEachCalls, seriesID)
+ FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id4"),
+ xtime.UnixNano(1).ToTime(), version+1, gomock.Any()).
+ Return(result, nil)
+ err = mergeWith.ForEachRemaining(ctx, 1, func(seriesMetadata doc.Document, result block.FetchBlockResult) error {
+ forEachCalls = append(forEachCalls, seriesMetadata)
return nil
}, nsCtx)
require.NoError(t, err)
require.Len(t, forEachCalls, 2)
- assert.Equal(t, id2, forEachCalls[0])
- assert.Equal(t, id4, forEachCalls[1])
+ assert.Equal(t, id2.Bytes(), forEachCalls[0].ID)
+ assert.Equal(t, id4.Bytes(), forEachCalls[1].ID)
- // Test call with error getting tags.
- shard.EXPECT().
- TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, false, errors.New("bad-tags"))
shard.EXPECT().
- FetchBlocksForColdFlush(gomock.Any(), id8, xtime.UnixNano(4).ToTime(), version+1, gomock.Any()).
- Return(fetchedBlocks, nil)
- err = mergeWith.ForEachRemaining(ctx, 4, func(seriesID ident.ID, tags ident.Tags, data []xio.BlockReader) error {
- // This function won't be called with the above error.
- return errors.New("unreachable")
- }, nsCtx)
- assert.Error(t, err)
+ FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id8"),
+ xtime.UnixNano(4).ToTime(), version+1, gomock.Any()).
+ Return(result, nil)
// Test call with bad function execution.
- shard.EXPECT().
- TagsFromSeriesID(gomock.Any()).Return(ident.Tags{}, true, nil)
- err = mergeWith.ForEachRemaining(ctx, 4, func(seriesID ident.ID, tags ident.Tags, data []xio.BlockReader) error {
+ err = mergeWith.ForEachRemaining(ctx, 4, func(seriesMetadata doc.Document, result block.FetchBlockResult) error {
return errors.New("bad")
}, nsCtx)
assert.Error(t, err)
@@ -237,7 +241,7 @@ func addDirtySeries(
seriesList = newIDList(nil)
dirtySeriesToWrite[start] = seriesList
}
- element := seriesList.PushBack(id)
+ element := seriesList.PushBack(doc.Document{ID: id.Bytes()})
- dirtySeries.Set(idAndBlockStart{blockStart: start, id: id}, element)
+ dirtySeries.Set(idAndBlockStart{blockStart: start, id: id.Bytes()}, element)
}
diff --git a/src/dbnode/storage/fs_test.go b/src/dbnode/storage/fs_test.go
index ae940c5206..17bff01cc6 100644
--- a/src/dbnode/storage/fs_test.go
+++ b/src/dbnode/storage/fs_test.go
@@ -85,7 +85,7 @@ func TestFileSystemManagerRun(t *testing.T) {
ts := time.Now()
gomock.InOrder(
- cm.EXPECT().Cleanup(ts).Return(errors.New("foo")),
+ cm.EXPECT().WarmFlushCleanup(ts, true).Return(errors.New("foo")),
fm.EXPECT().Flush(ts).Return(errors.New("bar")),
)
diff --git a/src/dbnode/storage/id_list_gen.go b/src/dbnode/storage/id_list_gen.go
index e66102dace..027e24acbc 100644
--- a/src/dbnode/storage/id_list_gen.go
+++ b/src/dbnode/storage/id_list_gen.go
@@ -25,7 +25,9 @@
package storage
import (
- "github.com/m3db/m3/src/x/ident"
+ "sync"
+
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/pool"
)
@@ -97,7 +99,7 @@ type idElement struct {
list *idList
// The value stored with this element.
- Value ident.ID
+ Value doc.Document
}
// Next returns the next list element or nil.
@@ -130,11 +132,26 @@ func (l *idList) Init() *idList {
l.root.prev = &l.root
l.len = 0
if l.Pool == nil {
- l.Pool = newIDElementPool(nil)
+ // Use a static pool at least, otherwise each time
+ // we create a list with no pool we create a wholly
+ // new pool of finalizeables (4096 of them).
+ defaultElementPoolOnce.Do(initElementPool)
+ l.Pool = defaultElementPool
}
return l
}
+var (
+ defaultElementPoolOnce sync.Once
+ defaultElementPool *idElementPool
+)
+
+// define as a static method so lambda alloc not required
+// when passing function pointer to sync.Once.Do.
+func initElementPool() {
+ defaultElementPool = newIDElementPool(nil)
+}
+
// newIDList returns an initialized list.
func newIDList(p *idElementPool) *idList {
l := &idList{Pool: p}
@@ -181,7 +198,7 @@ func (l *idList) insert(e, at *idElement) *idElement {
}
// insertValue is a convenience wrapper for inserting using the list's pool.
-func (l *idList) insertValue(v ident.ID, at *idElement) *idElement {
+func (l *idList) insertValue(v doc.Document, at *idElement) *idElement {
e := l.Pool.get()
e.Value = v
return l.insert(e, at)
@@ -201,7 +218,7 @@ func (l *idList) remove(e *idElement) *idElement {
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
-func (l *idList) Remove(e *idElement) ident.ID {
+func (l *idList) Remove(e *idElement) doc.Document {
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash.
@@ -212,13 +229,13 @@ func (l *idList) Remove(e *idElement) ident.ID {
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *idList) PushFront(v ident.ID) *idElement {
+func (l *idList) PushFront(v doc.Document) *idElement {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
-func (l *idList) PushBack(v ident.ID) *idElement {
+func (l *idList) PushBack(v doc.Document) *idElement {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
@@ -226,7 +243,7 @@ func (l *idList) PushBack(v ident.ID) *idElement {
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
-func (l *idList) InsertBefore(v ident.ID, mark *idElement) *idElement {
+func (l *idList) InsertBefore(v doc.Document, mark *idElement) *idElement {
if mark.list != l {
return nil
}
@@ -237,7 +254,7 @@ func (l *idList) InsertBefore(v ident.ID, mark *idElement) *idElement {
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
-func (l *idList) InsertAfter(v ident.ID, mark *idElement) *idElement {
+func (l *idList) InsertAfter(v doc.Document, mark *idElement) *idElement {
if mark.list != l {
return nil
}
diff --git a/src/dbnode/storage/index.go b/src/dbnode/storage/index.go
index e2a0d3aafa..52407b8ffc 100644
--- a/src/dbnode/storage/index.go
+++ b/src/dbnode/storage/index.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -21,8 +21,11 @@
package storage
import (
+ "bytes"
"errors"
"fmt"
+ "math"
+ goruntime "runtime"
"sort"
"strconv"
"sync"
@@ -42,12 +45,15 @@ import (
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
"github.com/m3db/m3/src/dbnode/storage/index/convert"
+ "github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/tracepoint"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/idx"
m3ninxindex "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
@@ -72,11 +78,14 @@ var (
errDbIndexUnableToCleanupClosed = errors.New("unable to cleanup database index, already closed")
errDbIndexTerminatingTickCancellation = errors.New("terminating tick early due to cancellation")
errDbIndexIsBootstrapping = errors.New("index is already bootstrapping")
+ errDbIndexDoNotIndexSeries = errors.New("series matched do not index fields")
)
const (
defaultFlushReadDataBlocksBatchSize = int64(4096)
nsIndexReportStatsInterval = 10 * time.Second
+
+ defaultFlushDocsBatchSize = 8192
)
var (
@@ -97,14 +106,17 @@ type nsIndex struct {
bufferFuture time.Duration
coldWritesEnabled bool
- indexFilesetsBeforeFn indexFilesetsBeforeFn
- deleteFilesFn deleteFilesFn
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager
+ indexFilesetsBeforeFn indexFilesetsBeforeFn
+ deleteFilesFn deleteFilesFn
+ readIndexInfoFilesFn readIndexInfoFilesFn
- newBlockFn newBlockFn
- logger *zap.Logger
- opts Options
- nsMetadata namespace.Metadata
- runtimeOptsListener xclose.SimpleCloser
+ newBlockFn index.NewBlockFn
+ logger *zap.Logger
+ opts Options
+ nsMetadata namespace.Metadata
+ runtimeOptsListener xclose.SimpleCloser
+ runtimeNsOptsListener xclose.SimpleCloser
resultsPool index.QueryResultsPool
aggregateResultsPool index.AggregateResultsPool
@@ -123,6 +135,8 @@ type nsIndex struct {
// forwardIndexDice determines if an incoming index write should be dual
// written to the next block.
forwardIndexDice forwardIndexDice
+
+ doNotIndexWithFields []doc.Field
}
type nsIndexState struct {
@@ -152,6 +166,8 @@ type nsIndexState struct {
// shardsFilterID is set every time the shards change to correctly
// only return IDs that this node owns.
shardsFilterID func(ident.ID) bool
+
+ shardsAssigned map[uint32]struct{}
}
// NB: nsIndexRuntimeOptions does not contain its own mutex as some of the variables
@@ -160,17 +176,11 @@ type nsIndexState struct {
// under the same nsIndex mutex.
type nsIndexRuntimeOptions struct {
insertMode index.InsertMode
- maxQueryLimit int64
+ maxQuerySeriesLimit int64
+ maxQueryDocsLimit int64
defaultQueryTimeout time.Duration
}
-type newBlockFn func(
- time.Time,
- namespace.Metadata,
- index.BlockOptions,
- index.Options,
-) (index.Block, error)
-
// NB(prateek): the returned filesets are strictly before the given time, i.e. they
// live in the period (-infinity, exclusiveTime).
type indexFilesetsBeforeFn func(dir string,
@@ -178,12 +188,18 @@ type indexFilesetsBeforeFn func(dir string,
exclusiveTime time.Time,
) ([]string, error)
+type readIndexInfoFilesFn func(filePathPrefix string,
+ namespace ident.ID,
+ readerBufferSize int,
+) []fs.ReadIndexInfoFileResult
+
type newNamespaceIndexOpts struct {
- md namespace.Metadata
- shardSet sharding.ShardSet
- opts Options
- newIndexQueueFn newNamespaceIndexInsertQueueFn
- newBlockFn newBlockFn
+ md namespace.Metadata
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager
+ shardSet sharding.ShardSet
+ opts Options
+ newIndexQueueFn newNamespaceIndexInsertQueueFn
+ newBlockFn index.NewBlockFn
}
// execBlockQueryFn executes a query against the given block whilst tracking state.
@@ -208,54 +224,60 @@ type asyncQueryExecState struct {
// newNamespaceIndex returns a new namespaceIndex for the provided namespace.
func newNamespaceIndex(
nsMD namespace.Metadata,
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,
shardSet sharding.ShardSet,
opts Options,
-) (namespaceIndex, error) {
+) (NamespaceIndex, error) {
return newNamespaceIndexWithOptions(newNamespaceIndexOpts{
- md: nsMD,
- shardSet: shardSet,
- opts: opts,
- newIndexQueueFn: newNamespaceIndexInsertQueue,
- newBlockFn: index.NewBlock,
+ md: nsMD,
+ namespaceRuntimeOptsMgr: namespaceRuntimeOptsMgr,
+ shardSet: shardSet,
+ opts: opts,
+ newIndexQueueFn: newNamespaceIndexInsertQueue,
+ newBlockFn: index.NewBlock,
})
}
// newNamespaceIndexWithInsertQueueFn is a ctor used in tests to override the insert queue.
func newNamespaceIndexWithInsertQueueFn(
nsMD namespace.Metadata,
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,
shardSet sharding.ShardSet,
newIndexQueueFn newNamespaceIndexInsertQueueFn,
opts Options,
-) (namespaceIndex, error) {
+) (NamespaceIndex, error) {
return newNamespaceIndexWithOptions(newNamespaceIndexOpts{
- md: nsMD,
- shardSet: shardSet,
- opts: opts,
- newIndexQueueFn: newIndexQueueFn,
- newBlockFn: index.NewBlock,
+ md: nsMD,
+ namespaceRuntimeOptsMgr: namespaceRuntimeOptsMgr,
+ shardSet: shardSet,
+ opts: opts,
+ newIndexQueueFn: newIndexQueueFn,
+ newBlockFn: index.NewBlock,
})
}
// newNamespaceIndexWithNewBlockFn is a ctor used in tests to inject blocks.
func newNamespaceIndexWithNewBlockFn(
nsMD namespace.Metadata,
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,
shardSet sharding.ShardSet,
- newBlockFn newBlockFn,
+ newBlockFn index.NewBlockFn,
opts Options,
-) (namespaceIndex, error) {
+) (NamespaceIndex, error) {
return newNamespaceIndexWithOptions(newNamespaceIndexOpts{
- md: nsMD,
- shardSet: shardSet,
- opts: opts,
- newIndexQueueFn: newNamespaceIndexInsertQueue,
- newBlockFn: newBlockFn,
+ md: nsMD,
+ namespaceRuntimeOptsMgr: namespaceRuntimeOptsMgr,
+ shardSet: shardSet,
+ opts: opts,
+ newIndexQueueFn: newNamespaceIndexInsertQueue,
+ newBlockFn: newBlockFn,
})
}
// newNamespaceIndexWithOptions returns a new namespaceIndex with the provided configuration options.
func newNamespaceIndexWithOptions(
newIndexOpts newNamespaceIndexOpts,
-) (namespaceIndex, error) {
+) (NamespaceIndex, error) {
var (
nsMD = newIndexOpts.md
shardSet = newIndexOpts.shardSet
@@ -278,13 +300,26 @@ func newNamespaceIndexWithOptions(
indexOpts = indexOpts.SetInstrumentOptions(instrumentOpts)
nowFn := indexOpts.ClockOptions().NowFn()
+ logger := indexOpts.InstrumentOptions().Logger()
+
+ var doNotIndexWithFields []doc.Field
+ if m := newIndexOpts.opts.DoNotIndexWithFieldsMap(); m != nil && len(m) != 0 {
+ for k, v := range m {
+ doNotIndexWithFields = append(doNotIndexWithFields, doc.Field{
+ Name: []byte(k),
+ Value: []byte(v),
+ })
+ }
+ }
+
idx := &nsIndex{
state: nsIndexState{
closeCh: make(chan struct{}),
runtimeOpts: nsIndexRuntimeOptions{
insertMode: indexOpts.InsertMode(), // FOLLOWUP(prateek): wire to allow this to be tweaked at runtime
},
- blocksByTime: make(map[xtime.UnixNano]index.Block),
+ blocksByTime: make(map[xtime.UnixNano]index.Block),
+ shardsAssigned: make(map[uint32]struct{}),
},
nowFn: nowFn,
@@ -295,12 +330,14 @@ func newNamespaceIndexWithOptions(
bufferFuture: nsMD.Options().RetentionOptions().BufferFuture(),
coldWritesEnabled: nsMD.Options().ColdWritesEnabled(),
- indexFilesetsBeforeFn: fs.IndexFileSetsBefore,
- deleteFilesFn: fs.DeleteFiles,
+ namespaceRuntimeOptsMgr: newIndexOpts.namespaceRuntimeOptsMgr,
+ indexFilesetsBeforeFn: fs.IndexFileSetsBefore,
+ readIndexInfoFilesFn: fs.ReadIndexInfoFiles,
+ deleteFilesFn: fs.DeleteFiles,
newBlockFn: newBlockFn,
opts: newIndexOpts.opts,
- logger: indexOpts.InstrumentOptions().Logger(),
+ logger: logger,
nsMetadata: nsMD,
resultsPool: indexOpts.QueryResultsPool(),
@@ -308,14 +345,15 @@ func newNamespaceIndexWithOptions(
queryWorkersPool: newIndexOpts.opts.QueryIDsWorkerPool(),
metrics: newNamespaceIndexMetrics(indexOpts, instrumentOpts),
+
+ doNotIndexWithFields: doNotIndexWithFields,
}
// Assign shard set upfront.
idx.AssignShardSet(shardSet)
- if runtimeOptsMgr != nil {
- idx.runtimeOptsListener = runtimeOptsMgr.RegisterListener(idx)
- }
+ idx.runtimeOptsListener = runtimeOptsMgr.RegisterListener(idx)
+ idx.runtimeNsOptsListener = idx.namespaceRuntimeOptsMgr.RegisterListener(idx)
// set up forward index dice.
dice, err := newForwardIndexDice(newIndexOpts.opts)
@@ -323,6 +361,21 @@ func newNamespaceIndexWithOptions(
return nil, err
}
+ if dice.enabled {
+ logger.Info("namespace forward indexing configured",
+ zap.Stringer("namespace", nsMD.ID()),
+ zap.Bool("enabled", dice.enabled),
+ zap.Duration("threshold", dice.forwardIndexThreshold),
+ zap.Float64("rate", dice.forwardIndexDice.Rate()))
+ } else {
+ idxOpts := newIndexOpts.opts.IndexOptions()
+ logger.Info("namespace forward indexing not enabled",
+ zap.Stringer("namespace", nsMD.ID()),
+ zap.Bool("enabled", false),
+ zap.Float64("threshold", idxOpts.ForwardIndexThreshold()),
+ zap.Float64("probability", idxOpts.ForwardIndexProbability()))
+ }
+
idx.forwardIndexDice = dice
// allocate indexing queue and start it up.
@@ -353,6 +406,15 @@ func (i *nsIndex) SetRuntimeOptions(value runtime.Options) {
i.state.Unlock()
}
+func (i *nsIndex) SetNamespaceRuntimeOptions(opts namespace.RuntimeOptions) {
+ // We don't like to log from every single index segment that has
+ // settings updated so we log the changes here.
+ i.logger.Info("set namespace runtime index options",
+ zap.Stringer("namespace", i.nsMetadata.ID()),
+ zap.Any("writeIndexingPerCPUConcurrency", opts.WriteIndexingPerCPUConcurrency()),
+ zap.Any("flushIndexingPerCPUConcurrency", opts.FlushIndexingPerCPUConcurrency()))
+}
+
func (i *nsIndex) reportStatsUntilClosed() {
ticker := time.NewTicker(nsIndexReportStatsInterval)
defer ticker.Stop()
@@ -379,15 +441,70 @@ func (i *nsIndex) reportStats() error {
i.state.RLock()
defer i.state.RUnlock()
- foregroundLevels := i.metrics.BlockMetrics.ForegroundSegments.Levels
+ foregroundLevels := i.metrics.blockMetrics.ForegroundSegments.Levels
foregroundLevelStats := make([]nsIndexCompactionLevelStats, len(foregroundLevels))
- backgroundLevels := i.metrics.BlockMetrics.BackgroundSegments.Levels
+ backgroundLevels := i.metrics.blockMetrics.BackgroundSegments.Levels
backgroundLevelStats := make([]nsIndexCompactionLevelStats, len(backgroundLevels))
- flushedLevels := i.metrics.BlockMetrics.FlushedSegments.Levels
+ flushedLevels := i.metrics.blockMetrics.FlushedSegments.Levels
flushedLevelStats := make([]nsIndexCompactionLevelStats, len(flushedLevels))
+ minIndexConcurrency := 0
+ maxIndexConcurrency := 0
+ sumIndexConcurrency := 0
+ numIndexingStats := 0
+ reporter := index.NewBlockStatsReporter(
+ func(s index.BlockSegmentStats) {
+ var (
+ levels []nsIndexBlocksSegmentsLevelMetrics
+ levelStats []nsIndexCompactionLevelStats
+ )
+ switch s.Type {
+ case index.ActiveForegroundSegment:
+ levels = foregroundLevels
+ levelStats = foregroundLevelStats
+ case index.ActiveBackgroundSegment:
+ levels = backgroundLevels
+ levelStats = backgroundLevelStats
+ case index.FlushedSegment:
+ levels = flushedLevels
+ levelStats = flushedLevelStats
+ }
+
+ for i, l := range levels {
+ contained := s.Size >= l.MinSizeInclusive && s.Size < l.MaxSizeExclusive
+ if !contained {
+ continue
+ }
+
+ l.SegmentsAge.Record(s.Age)
+ levelStats[i].numSegments++
+ levelStats[i].numTotalDocs += s.Size
+
+ break
+ }
+ },
+ func(s index.BlockIndexingStats) {
+ first := numIndexingStats == 0
+ numIndexingStats++
+
+ if first {
+ minIndexConcurrency = s.IndexConcurrency
+ maxIndexConcurrency = s.IndexConcurrency
+ sumIndexConcurrency = s.IndexConcurrency
+ return
+ }
+
+ if v := s.IndexConcurrency; v < minIndexConcurrency {
+ minIndexConcurrency = v
+ }
+ if v := s.IndexConcurrency; v > maxIndexConcurrency {
+ maxIndexConcurrency = v
+ }
+ sumIndexConcurrency += s.IndexConcurrency
+ })
+
// iterate known blocks in a defined order of time (newest first)
// for debug log ordering
for _, start := range i.state.blockStartsDescOrder {
@@ -396,37 +513,7 @@ func (i *nsIndex) reportStats() error {
return i.missingBlockInvariantError(start)
}
- err := block.Stats(
- index.BlockStatsReporterFn(func(s index.BlockSegmentStats) {
- var (
- levels []nsIndexBlocksSegmentsLevelMetrics
- levelStats []nsIndexCompactionLevelStats
- )
- switch s.Type {
- case index.ActiveForegroundSegment:
- levels = foregroundLevels
- levelStats = foregroundLevelStats
- case index.ActiveBackgroundSegment:
- levels = backgroundLevels
- levelStats = backgroundLevelStats
- case index.FlushedSegment:
- levels = flushedLevels
- levelStats = flushedLevelStats
- }
-
- for i, l := range levels {
- contained := s.Size >= l.MinSizeInclusive && s.Size < l.MaxSizeExclusive
- if !contained {
- continue
- }
-
- l.SegmentsAge.Record(s.Age)
- levelStats[i].numSegments++
- levelStats[i].numTotalDocs += s.Size
-
- break
- }
- }))
+ err := block.Stats(reporter)
if err == index.ErrUnableReportStatsBlockClosed {
// Closed blocks are temporarily in the list still
continue
@@ -436,6 +523,7 @@ func (i *nsIndex) reportStats() error {
}
}
+ // Update level stats.
for _, elem := range []struct {
levels []nsIndexBlocksSegmentsLevelMetrics
levelStats []nsIndexCompactionLevelStats
@@ -449,6 +537,12 @@ func (i *nsIndex) reportStats() error {
}
}
+ // Update the indexing stats.
+ i.metrics.indexingConcurrencyMin.Update(float64(minIndexConcurrency))
+ i.metrics.indexingConcurrencyMax.Update(float64(maxIndexConcurrency))
+ avgIndexConcurrency := float64(sumIndexConcurrency) / float64(numIndexingStats)
+ i.metrics.indexingConcurrencyAvg.Update(avgIndexConcurrency)
+
return nil
}
@@ -456,6 +550,10 @@ func (i *nsIndex) BlockStartForWriteTime(writeTime time.Time) xtime.UnixNano {
return xtime.ToUnixNano(writeTime.Truncate(i.blockSize))
}
+func (i *nsIndex) BlockForBlockStart(blockStart time.Time) (index.Block, error) {
+ return i.ensureBlockPresent(blockStart)
+}
+
// NB(prateek): including the call chains leading to this point:
//
// - For new entry (previously unseen in the shard):
@@ -483,7 +581,7 @@ func (i *nsIndex) WriteBatch(
i.state.RLock()
if !i.isOpenWithRLock() {
i.state.RUnlock()
- i.metrics.InsertAfterClose.Inc(1)
+ i.metrics.insertAfterClose.Inc(1)
err := errDbIndexUnableToWriteClosed
batch.MarkUnmarkedEntriesError(err)
return err
@@ -519,6 +617,22 @@ func (i *nsIndex) WriteBatch(
return nil
}
+func (i *nsIndex) WritePending(
+ pending []writes.PendingIndexInsert,
+) error {
+ i.state.RLock()
+ if !i.isOpenWithRLock() {
+ i.state.RUnlock()
+ i.metrics.insertAfterClose.Inc(1)
+ return errDbIndexUnableToWriteClosed
+ }
+ _, err := i.state.insertQueue.InsertPending(pending)
+ // release the lock because we don't need it past this point.
+ i.state.RUnlock()
+
+ return err
+}
+
// WriteBatches is called by the indexInsertQueue.
func (i *nsIndex) writeBatches(
batch *index.WriteBatch,
@@ -534,13 +648,18 @@ func (i *nsIndex) writeBatches(
return
}
var (
- now = i.nowFn()
- blockSize = i.blockSize
- futureLimit = now.Add(1 * i.bufferFuture)
- pastLimit = now.Add(-1 * i.bufferPast)
- batchOptions = batch.Options()
- forwardIndexDice = i.forwardIndexDice
- forwardIndexEnabled = forwardIndexDice.enabled
+ now = i.nowFn()
+ blockSize = i.blockSize
+ futureLimit = now.Add(1 * i.bufferFuture)
+ pastLimit = now.Add(-1 * i.bufferPast)
+ earliestBlockStartToRetain = retention.FlushTimeStartForRetentionPeriod(i.retentionPeriod, i.blockSize, now)
+ batchOptions = batch.Options()
+ forwardIndexDice = i.forwardIndexDice
+ forwardIndexEnabled = forwardIndexDice.enabled
+ total int
+ notSkipped int
+ forwardIndexHits int
+ forwardIndexMiss int
forwardIndexBatch *index.WriteBatch
)
@@ -555,35 +674,76 @@ func (i *nsIndex) writeBatches(
// is not enabled.
forwardIndexBatch = index.NewWriteBatch(batchOptions)
}
+
// Ensure timestamp is not too old/new based on retention policies and that
// doc is valid. Add potential forward writes to the forwardWriteBatch.
batch.ForEach(
func(idx int, entry index.WriteBatchEntry,
d doc.Document, _ index.WriteBatchEntryResult) {
+ total++
+
+ if len(i.doNotIndexWithFields) != 0 {
+ // This feature rarely used, do not optimize and just do n*m checks.
+ drop := true
+ for _, matchField := range i.doNotIndexWithFields {
+ matchedField := false
+ for _, actualField := range d.Fields {
+ if bytes.Compare(actualField.Name, matchField.Name) == 0 {
+ matchedField = bytes.Compare(actualField.Value, matchField.Value) == 0
+ break
+ }
+ }
+ if !matchedField {
+ drop = false
+ break
+ }
+ }
+ if drop {
+ batch.MarkUnmarkedEntryError(errDbIndexDoNotIndexSeries, idx)
+ return
+ }
+ }
+
ts := entry.Timestamp
+ // NB(bodu): Always check first to see if the write is within retention.
+ if !ts.After(earliestBlockStartToRetain) {
+ batch.MarkUnmarkedEntryError(m3dberrors.ErrTooPast, idx)
+ return
+ }
+
if !futureLimit.After(ts) {
batch.MarkUnmarkedEntryError(m3dberrors.ErrTooFuture, idx)
return
}
- if !ts.After(pastLimit) {
+ if ts.Before(pastLimit) && !i.coldWritesEnabled {
+ // NB(bodu): We only mark entries as too far in the past if
+ // cold writes are not enabled.
batch.MarkUnmarkedEntryError(m3dberrors.ErrTooPast, idx)
return
}
- if forwardIndexDice.roll(ts) {
- forwardEntryTimestamp := ts.Truncate(blockSize).Add(blockSize)
- xNanoTimestamp := xtime.ToUnixNano(forwardEntryTimestamp)
- if entry.OnIndexSeries.NeedsIndexUpdate(xNanoTimestamp) {
- forwardIndexEntry := entry
- forwardIndexEntry.Timestamp = forwardEntryTimestamp
- forwardIndexEntry.OnIndexSeries.OnIndexPrepare()
- forwardIndexBatch.Append(forwardIndexEntry, d)
+ if forwardIndexEnabled {
+ if forwardIndexDice.roll(ts) {
+ forwardIndexHits++
+ forwardEntryTimestamp := ts.Truncate(blockSize).Add(blockSize)
+ xNanoTimestamp := xtime.ToUnixNano(forwardEntryTimestamp)
+ if entry.OnIndexSeries.NeedsIndexUpdate(xNanoTimestamp) {
+ forwardIndexEntry := entry
+ forwardIndexEntry.Timestamp = forwardEntryTimestamp
+ forwardIndexEntry.OnIndexSeries.OnIndexPrepare()
+ forwardIndexBatch.Append(forwardIndexEntry, d)
+ }
+ } else {
+ forwardIndexMiss++
}
}
+
+ notSkipped++
})
if forwardIndexEnabled && forwardIndexBatch.Len() > 0 {
+ i.metrics.forwardIndexCounter.Inc(int64(forwardIndexBatch.Len()))
batch.AppendAll(forwardIndexBatch)
}
@@ -591,11 +751,22 @@ func (i *nsIndex) writeBatches(
// for each block, making sure to not try to insert any entries already marked
// with a result.
batch.ForEachUnmarkedBatchByBlockStart(i.writeBatchForBlockStart)
+
+ // Track index insertions.
+ // Note: attemptTotal should = attemptSkip + attemptWrite.
+ i.metrics.asyncInsertAttemptTotal.Inc(int64(total))
+ i.metrics.asyncInsertAttemptSkip.Inc(int64(total - notSkipped))
+ i.metrics.forwardIndexHits.Inc(int64(forwardIndexHits))
+ i.metrics.forwardIndexMisses.Inc(int64(forwardIndexMiss))
}
func (i *nsIndex) writeBatchForBlockStart(
blockStart time.Time, batch *index.WriteBatch,
) {
+ // NB(r): Capture pending entries so we can emit the latencies
+ pending := batch.PendingEntries()
+ numPending := len(pending)
+
// NB(r): Notice we acquire each lock only to take a reference to the
// block we release it so we don't block the tick, etc when we insert
// batches since writing batches can take significant time when foreground
@@ -608,12 +779,13 @@ func (i *nsIndex) writeBatchForBlockStart(
zap.Int("numWrites", batch.Len()),
zap.Error(err),
)
- i.metrics.AsyncInsertErrors.Inc(int64(batch.Len()))
+ i.metrics.asyncInsertErrors.Inc(int64(numPending))
return
}
- // NB(r): Capture pending entries so we can emit the latencies
- pending := batch.PendingEntries()
+ // Track attempted write.
+ // Note: attemptTotal should = attemptSkip + attemptWrite.
+ i.metrics.asyncInsertAttemptWrite.Inc(int64(numPending))
// i.e. we have the block and the inserts, perform the writes.
result, err := block.WriteBatch(batch)
@@ -622,27 +794,25 @@ func (i *nsIndex) writeBatchForBlockStart(
now := i.nowFn()
for idx := range pending {
took := now.Sub(pending[idx].EnqueuedAt)
- i.metrics.InsertEndToEndLatency.Record(took)
+ i.metrics.insertEndToEndLatency.Record(took)
}
// NB: we don't need to do anything to the OnIndexSeries refs in `inserts` at this point,
// the index.Block WriteBatch assumes responsibility for calling the appropriate methods.
if n := result.NumSuccess; n > 0 {
- i.metrics.AsyncInsertSuccess.Inc(n)
- }
- if n := result.NumError; n > 0 {
- i.metrics.AsyncInsertErrors.Inc(n)
+ i.metrics.asyncInsertSuccess.Inc(n)
}
- if err != nil {
- // NB: dropping duplicate id error messages from logs as they're expected when we see
- // repeated inserts. as long as a block has an ID, it's not an error so we don't need
- // to pollute the logs with these messages.
+ // Allow for duplicate write errors since due to re-indexing races
+ // we may try to re-index a series more than once.
+ if err := i.sanitizeAllowDuplicatesWriteError(err); err != nil {
+ numErrors := numPending - int(result.NumSuccess)
if partialError, ok := err.(*m3ninxindex.BatchPartialError); ok {
- err = partialError.FilterDuplicateIDErrors()
+ // If it was a batch partial error we know exactly how many failed
+ // after filtering out for duplicate ID errors.
+ numErrors = len(partialError.Errs())
}
- }
- if err != nil {
+ i.metrics.asyncInsertErrors.Inc(int64(numErrors))
i.logger.Error("error writing to index block", zap.Error(err))
}
}
@@ -694,7 +864,6 @@ func (i *nsIndex) Tick(c context.Cancellable, startTime time.Time) (namespaceInd
var (
result = namespaceIndexTickResult{}
earliestBlockStartToRetain = retention.FlushTimeStartForRetentionPeriod(i.retentionPeriod, i.blockSize, startTime)
- lastSealableBlockStart = retention.FlushTimeEndForBlockSize(i.blockSize, startTime.Add(-i.bufferPast))
)
i.state.Lock()
@@ -725,10 +894,13 @@ func (i *nsIndex) Tick(c context.Cancellable, startTime time.Time) (namespaceInd
blockTickResult, tickErr := block.Tick(c)
multiErr = multiErr.Add(tickErr)
result.NumSegments += blockTickResult.NumSegments
+ result.NumSegmentsBootstrapped += blockTickResult.NumSegmentsBootstrapped
+ result.NumSegmentsMutable += blockTickResult.NumSegmentsMutable
result.NumTotalDocs += blockTickResult.NumDocs
+ result.FreeMmap += blockTickResult.FreeMmap
// seal any blocks that are sealable
- if !blockStart.ToTime().After(lastSealableBlockStart) && !block.IsSealed() {
+ if !blockStart.ToTime().After(i.lastSealableBlockStart(startTime)) && !block.IsSealed() {
multiErr = multiErr.Add(block.Seal())
result.NumBlocksSealed++
}
@@ -737,20 +909,39 @@ func (i *nsIndex) Tick(c context.Cancellable, startTime time.Time) (namespaceInd
return result, multiErr.FinalError()
}
-func (i *nsIndex) Flush(
+func (i *nsIndex) WarmFlush(
flush persist.IndexFlush,
shards []databaseShard,
) error {
- flushable, err := i.flushableBlocks(shards)
+ if len(shards) == 0 {
+ // No-op if no shards currently owned.
+ return nil
+ }
+
+ flushable, err := i.flushableBlocks(shards, series.WarmWrite)
if err != nil {
return err
}
- builderOpts := i.opts.IndexOptions().SegmentBuilderOptions()
+ // Determine the current flush indexing concurrency.
+ namespaceRuntimeOpts := i.namespaceRuntimeOptsMgr.Get()
+ perCPUFraction := namespaceRuntimeOpts.FlushIndexingPerCPUConcurrencyOrDefault()
+ cpus := math.Ceil(perCPUFraction * float64(goruntime.NumCPU()))
+ concurrency := int(math.Max(1, cpus))
+
+ builderOpts := i.opts.IndexOptions().SegmentBuilderOptions().
+ SetConcurrency(concurrency)
+
builder, err := builder.NewBuilderFromDocuments(builderOpts)
if err != nil {
return err
}
+ defer builder.Close()
+
+ // Emit concurrency, then reset gauge to zero to show time
+ // active during flushing broken down per namespace.
+ i.metrics.flushIndexingConcurrency.Update(float64(concurrency))
+ defer i.metrics.flushIndexingConcurrency.Update(0)
var evicted int
for _, block := range flushable {
@@ -760,11 +951,18 @@ func (i *nsIndex) Flush(
}
// Make a result that covers the entire time ranges for the
// block for each shard
- fulfilled := result.NewShardTimeRanges(block.StartTime(), block.EndTime(),
+ fulfilled := result.NewShardTimeRangesFromRange(block.StartTime(), block.EndTime(),
dbShards(shards).IDs()...)
- // Add the results to the block
- results := result.NewIndexBlock(block.StartTime(), immutableSegments,
- fulfilled)
+
+ // Add the results to the block.
+ persistedSegments := make([]result.Segment, 0, len(immutableSegments))
+ for _, elem := range immutableSegments {
+ persistedSegment := result.NewSegment(elem, true)
+ persistedSegments = append(persistedSegments, persistedSegment)
+ }
+ blockResult := result.NewIndexBlock(persistedSegments, fulfilled)
+ results := result.NewIndexBlockByVolumeType(block.StartTime())
+ results.SetBlock(idxpersist.DefaultIndexVolumeType, blockResult)
if err := block.AddResults(results); err != nil {
return err
}
@@ -782,47 +980,108 @@ func (i *nsIndex) Flush(
)
}
}
- i.metrics.BlocksEvictedMutableSegments.Inc(int64(evicted))
+ i.metrics.blocksEvictedMutableSegments.Inc(int64(evicted))
return nil
}
+func (i *nsIndex) ColdFlush(shards []databaseShard) (OnColdFlushDone, error) {
+ if len(shards) == 0 {
+ // No-op if no shards currently owned.
+ return func() error { return nil }, nil
+ }
+
+ flushable, err := i.flushableBlocks(shards, series.ColdWrite)
+ if err != nil {
+ return nil, err
+ }
+ // We only rotate cold mutable segments in phase I of cold flushing.
+ for _, block := range flushable {
+ block.RotateColdMutableSegments()
+ }
+ // We can't immediately evict cold mutable segments so we return a callback to do so
+ // when cold flush finishes.
+ return func() error {
+ multiErr := xerrors.NewMultiError()
+ for _, block := range flushable {
+ multiErr = multiErr.Add(block.EvictColdMutableSegments())
+ }
+ return multiErr.FinalError()
+ }, nil
+}
+
func (i *nsIndex) flushableBlocks(
shards []databaseShard,
+ flushType series.WriteType,
) ([]index.Block, error) {
i.state.RLock()
defer i.state.RUnlock()
if !i.isOpenWithRLock() {
return nil, errDbIndexUnableToFlushClosed
}
+ // NB(bodu): We read index info files once here to avoid re-reading all of them
+ // for each block.
+ fsOpts := i.opts.CommitLogOptions().FilesystemOptions()
+ infoFiles := i.readIndexInfoFilesFn(
+ fsOpts.FilePathPrefix(),
+ i.nsMetadata.ID(),
+ fsOpts.InfoReaderBufferSize(),
+ )
flushable := make([]index.Block, 0, len(i.state.blocksByTime))
- for _, block := range i.state.blocksByTime {
- canFlush, err := i.canFlushBlock(block, shards)
+
+ now := i.nowFn()
+ earliestBlockStartToRetain := retention.FlushTimeStartForRetentionPeriod(i.retentionPeriod, i.blockSize, now)
+ currentBlockStart := now.Truncate(i.blockSize)
+ // Check for flushable blocks by iterating through all block starts w/in retention.
+ for blockStart := earliestBlockStartToRetain; blockStart.Before(currentBlockStart); blockStart = blockStart.Add(i.blockSize) {
+ block, err := i.ensureBlockPresentWithRLock(blockStart)
+ if err != nil {
+ return nil, err
+ }
+
+ canFlush, err := i.canFlushBlockWithRLock(infoFiles, now, blockStart,
+ block, shards, flushType)
if err != nil {
return nil, err
}
if !canFlush {
continue
}
+
flushable = append(flushable, block)
}
return flushable, nil
}
-func (i *nsIndex) canFlushBlock(
+func (i *nsIndex) canFlushBlockWithRLock(
+ infoFiles []fs.ReadIndexInfoFileResult,
+ startTime time.Time,
+ blockStart time.Time,
block index.Block,
shards []databaseShard,
+ flushType series.WriteType,
) (bool, error) {
- // Check the block needs flushing because it is sealed and has
- // any mutable segments that need to be evicted from memory
- if !block.IsSealed() || !block.NeedsMutableSegmentsEvicted() {
- return false, nil
+ switch flushType {
+ case series.WarmWrite:
+ // NB(bodu): We should always attempt to warm flush sealed blocks to disk if
+ // there doesn't already exist data on disk. We're checking this instead of
+ // `block.NeedsMutableSegmentsEvicted()` since bootstrap writes for cold block starts
+ // get marked as warm writes if there doesn't already exist data on disk and need to
+ // properly go through the warm flush lifecycle.
+ if !block.IsSealed() || i.hasIndexWarmFlushedToDisk(infoFiles, blockStart) {
+ return false, nil
+ }
+ case series.ColdWrite:
+ if !block.NeedsColdMutableSegmentsEvicted() {
+ return false, nil
+ }
}
// Check all data files exist for the shards we own
for _, shard := range shards {
- start := block.StartTime()
+ start := blockStart
+ end := blockStart.Add(i.blockSize)
dataBlockSize := i.nsMetadata.Options().RetentionOptions().BlockSize()
- for t := start; t.Before(block.EndTime()); t = t.Add(dataBlockSize) {
+ for t := start; t.Before(end); t = t.Add(dataBlockSize) {
flushState, err := shard.FlushState(t)
if err != nil {
return false, err
@@ -836,6 +1095,26 @@ func (i *nsIndex) canFlushBlock(
return true, nil
}
+func (i *nsIndex) hasIndexWarmFlushedToDisk(
+ infoFiles []fs.ReadIndexInfoFileResult,
+ blockStart time.Time,
+) bool {
+ var hasIndexWarmFlushedToDisk bool
+ // NB(bodu): We consider the block to have been warm flushed if there are any
+ // filesets on disk. This is consistent with the "has warm flushed" check in the db shard.
+ // Shard block starts are marked as having warm flushed if an info file is successfully read from disk.
+ for _, f := range infoFiles {
+ indexVolumeType := idxpersist.DefaultIndexVolumeType
+ if f.Info.IndexVolumeType != nil {
+ indexVolumeType = idxpersist.IndexVolumeType(f.Info.IndexVolumeType.Value)
+ }
+ if f.ID.BlockStart == blockStart && indexVolumeType == idxpersist.DefaultIndexVolumeType {
+ hasIndexWarmFlushedToDisk = true
+ }
+ }
+ return hasIndexWarmFlushedToDisk
+}
+
func (i *nsIndex) flushBlock(
flush persist.IndexFlush,
indexBlock index.Block,
@@ -853,6 +1132,8 @@ func (i *nsIndex) flushBlock(
BlockStart: indexBlock.StartTime(),
FileSetType: persist.FileSetFlushType,
Shards: allShards,
+ // NB(bodu): By default, we always write to the "default" index volume type.
+ IndexVolumeType: idxpersist.DefaultIndexVolumeType,
})
if err != nil {
return nil, err
@@ -887,9 +1168,15 @@ func (i *nsIndex) flushBlockSegment(
builder segment.DocumentsBuilder,
) error {
// Reset the builder
- builder.Reset(0)
+ builder.Reset()
+ var (
+ batch = m3ninxindex.Batch{AllowPartialUpdates: true}
+ batchSize = defaultFlushDocsBatchSize
+ )
ctx := i.opts.ContextPool().Get()
+ defer ctx.Close()
+
for _, shard := range shards {
var (
first = true
@@ -899,7 +1186,14 @@ func (i *nsIndex) flushBlockSegment(
first = false
var (
- opts = block.FetchBlocksMetadataOptions{}
+ opts = block.FetchBlocksMetadataOptions{
+ // NB(bodu): There is a lag between when data gets flushed
+ // to disk and when it gets removed from memory during the next
+ // Tick. In this case, the same series can exist both on disk
+ // and in memory at the same time resulting in dupe series IDs.
+ // Only read data from disk when flushing index segments.
+ OnlyDisk: true,
+ }
limit = defaultFlushReadDataBlocksBatchSize
results block.FetchBlocksMetadataResults
err error
@@ -912,14 +1206,41 @@ func (i *nsIndex) flushBlockSegment(
return err
}
+ // Reset docs batch before use.
+ batch.Docs = batch.Docs[:0]
for _, result := range results.Results() {
- doc, err := convert.FromMetricIter(result.ID, result.Tags)
+ doc, exists, err := shard.DocRef(result.ID)
if err != nil {
return err
}
+ if !exists {
+ doc, err = convert.FromSeriesIDAndTagIter(result.ID, result.Tags)
+ if err != nil {
+ return err
+ }
+ i.metrics.flushDocsNew.Inc(1)
+ } else {
+ i.metrics.flushDocsCached.Inc(1)
+ }
- _, err = builder.Insert(doc)
- if err != nil && err != m3ninxindex.ErrDuplicateID {
+ batch.Docs = append(batch.Docs, doc)
+ if len(batch.Docs) < batchSize {
+ continue
+ }
+
+ err = i.sanitizeAllowDuplicatesWriteError(builder.InsertBatch(batch))
+ if err != nil {
+ return err
+ }
+
+ // Reset docs after insertions.
+ batch.Docs = batch.Docs[:0]
+ }
+
+ // Add last batch if remaining.
+ if len(batch.Docs) > 0 {
+ err := i.sanitizeAllowDuplicatesWriteError(builder.InsertBatch(batch))
+ if err != nil {
return err
}
}
@@ -936,12 +1257,29 @@ func (i *nsIndex) flushBlockSegment(
return preparedPersist.Persist(builder)
}
+func (i *nsIndex) sanitizeAllowDuplicatesWriteError(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ // NB: dropping duplicate id error messages from logs as they're expected when we see
+ // repeated inserts. as long as a block has an ID, it's not an error so we don't need
+ // to pollute the logs with these messages.
+ if partialError, ok := err.(*m3ninxindex.BatchPartialError); ok {
+ err = partialError.FilterDuplicateIDErrors()
+ }
+
+ return err
+}
+
func (i *nsIndex) AssignShardSet(shardSet sharding.ShardSet) {
// NB(r): Allocate the filter function once, it can be used outside
// of locks as it depends on no internal state.
set := bitset.NewBitSet(uint(shardSet.Max()))
+ assigned := make(map[uint32]struct{})
for _, shardID := range shardSet.AllIDs() {
set.Set(uint(shardID))
+ assigned[shardID] = struct{}{}
}
i.state.Lock()
@@ -949,6 +1287,7 @@ func (i *nsIndex) AssignShardSet(shardSet sharding.ShardSet) {
// NB(r): Use a bitset for fast lookups.
return set.Test(uint(shardSet.Lookup(id)))
}
+ i.state.shardsAssigned = assigned
i.state.Unlock()
}
@@ -967,7 +1306,8 @@ func (i *nsIndex) Query(
logFields := []opentracinglog.Field{
opentracinglog.String("query", query.String()),
opentracinglog.String("namespace", i.nsMetadata.ID().String()),
- opentracinglog.Int("limit", opts.Limit),
+ opentracinglog.Int("seriesLimit", opts.SeriesLimit),
+ opentracinglog.Int("docsLimit", opts.DocsLimit),
xopentracing.Time("queryStart", opts.StartInclusive),
xopentracing.Time("queryEnd", opts.EndExclusive),
}
@@ -979,7 +1319,7 @@ func (i *nsIndex) Query(
// Get results and set the namespace ID and size limit.
results := i.resultsPool.Get()
results.Reset(i.nsMetadata.ID(), index.QueryResultsOptions{
- SizeLimit: opts.Limit,
+ SizeLimit: opts.SeriesLimit,
FilterID: i.shardsFilterID(),
})
ctx.RegisterFinalizer(results)
@@ -1002,7 +1342,8 @@ func (i *nsIndex) AggregateQuery(
logFields := []opentracinglog.Field{
opentracinglog.String("query", query.String()),
opentracinglog.String("namespace", i.nsMetadata.ID().String()),
- opentracinglog.Int("limit", opts.Limit),
+ opentracinglog.Int("seriesLimit", opts.SeriesLimit),
+ opentracinglog.Int("docsLimit", opts.DocsLimit),
xopentracing.Time("queryStart", opts.StartInclusive),
xopentracing.Time("queryEnd", opts.EndExclusive),
}
@@ -1014,22 +1355,25 @@ func (i *nsIndex) AggregateQuery(
// Get results and set the filters, namespace ID and size limit.
results := i.aggregateResultsPool.Get()
aopts := index.AggregateResultsOptions{
- SizeLimit: opts.Limit,
+ SizeLimit: opts.SeriesLimit,
FieldFilter: opts.FieldFilter,
Type: opts.Type,
}
ctx.RegisterFinalizer(results)
// use appropriate fn to query underlying blocks.
- // default to block.Query()
- fn := i.execBlockQueryFn
- // use block.Aggregate() when possible
- if query.Equal(allQuery) {
- fn = i.execBlockAggregateQueryFn
- }
- field, isField := idx.FieldQuery(query.Query)
- if isField {
- fn = i.execBlockAggregateQueryFn
- aopts.FieldFilter = aopts.FieldFilter.AddIfMissing(field)
+ // use block.Aggregate() for querying and set the query if required.
+ fn := i.execBlockAggregateQueryFn
+ isAllQuery := query.Equal(allQuery)
+ if !isAllQuery {
+ if field, isFieldQuery := idx.FieldQuery(query.Query); isFieldQuery {
+ aopts.FieldFilter = aopts.FieldFilter.AddIfMissing(field)
+ } else {
+ // Need to actually restrict whether we should return a term or not
+ // based on running the actual query to resolve a postings list and
+ // then seeing if that intersects the aggregated term postings list
+ // at all.
+ aopts.RestrictByQuery = &query
+ }
}
aopts.FieldFilter = aopts.FieldFilter.SortAndDedupe()
results.Reset(i.nsMetadata.ID(), aopts)
@@ -1058,9 +1402,47 @@ func (i *nsIndex) query(
exhaustive, err := i.queryWithSpan(ctx, query, results, opts, execBlockFn, sp, logFields)
if err != nil {
sp.LogFields(opentracinglog.Error(err))
+
+ if exhaustive {
+ i.metrics.queryExhaustiveInternalError.Inc(1)
+ } else {
+ i.metrics.queryNonExhaustiveInternalError.Inc(1)
+ }
+ return exhaustive, err
+ }
+
+ if exhaustive {
+ i.metrics.queryExhaustiveSuccess.Inc(1)
+ return exhaustive, nil
}
- return exhaustive, err
+ // If require exhaustive but not, return error.
+ if opts.RequireExhaustive {
+ seriesCount := results.Size()
+ docsCount := results.TotalDocsCount()
+ if opts.SeriesLimitExceeded(seriesCount) {
+ i.metrics.queryNonExhaustiveSeriesLimitError.Inc(1)
+ } else if opts.DocsLimitExceeded(docsCount) {
+ i.metrics.queryNonExhaustiveDocsLimitError.Inc(1)
+ } else {
+ i.metrics.queryNonExhaustiveLimitError.Inc(1)
+ }
+
+ err := fmt.Errorf(
+ "query exceeded limit: require_exhaustive=%v, series_limit=%d, series_matched=%d, docs_limit=%d, docs_matched=%d",
+ opts.RequireExhaustive,
+ opts.SeriesLimit,
+ seriesCount,
+ opts.DocsLimit,
+ docsCount,
+ )
+ // NB(r): Make sure error is not retried and returns as bad request.
+ return exhaustive, xerrors.NewInvalidParamsError(err)
+ }
+
+ // Otherwise non-exhaustive but not required to be.
+ i.metrics.queryNonExhaustiveSuccess.Inc(1)
+ return exhaustive, nil
}
func (i *nsIndex) queryWithSpan(
@@ -1130,8 +1512,9 @@ func (i *nsIndex) queryWithSpan(
// number of results that we're allowed to return. If thats the case, there
// is no value in kicking off more parallel queries, so we break out of
// the loop.
- size := results.Size()
- alreadyExceededLimit := opts.LimitExceeded(size)
+ seriesCount := results.Size()
+ docsCount := results.TotalDocsCount()
+ alreadyExceededLimit := opts.SeriesLimitExceeded(seriesCount) || opts.DocsLimitExceeded(docsCount)
if alreadyExceededLimit {
state.Lock()
state.exhaustive = false
@@ -1207,6 +1590,8 @@ func (i *nsIndex) queryWithSpan(
}
}
+ i.metrics.loadedDocsPerQuery.RecordValue(float64(results.TotalDocsCount()))
+
state.Lock()
// Take reference to vars to return while locked.
exhaustive := state.exhaustive
@@ -1216,7 +1601,6 @@ func (i *nsIndex) queryWithSpan(
if err != nil {
return false, err
}
-
return exhaustive, nil
}
@@ -1315,13 +1699,20 @@ func (i *nsIndex) timeoutForQueryWithRLock(
func (i *nsIndex) overriddenOptsForQueryWithRLock(
opts index.QueryOptions,
) index.QueryOptions {
- // Override query response limit if needed.
- if i.state.runtimeOpts.maxQueryLimit > 0 && (opts.Limit == 0 ||
- int64(opts.Limit) > i.state.runtimeOpts.maxQueryLimit) {
- i.logger.Debug("overriding query response limit",
- zap.Int("requested", opts.Limit),
- zap.Int64("maxAllowed", i.state.runtimeOpts.maxQueryLimit)) // FOLLOWUP(prateek): log query too once it's serializable.
- opts.Limit = int(i.state.runtimeOpts.maxQueryLimit)
+ // Override query response limits if needed.
+ if i.state.runtimeOpts.maxQuerySeriesLimit > 0 && (opts.SeriesLimit == 0 ||
+ int64(opts.SeriesLimit) > i.state.runtimeOpts.maxQuerySeriesLimit) {
+ i.logger.Debug("overriding query response series limit",
+ zap.Int("requested", opts.SeriesLimit),
+ zap.Int64("maxAllowed", i.state.runtimeOpts.maxQuerySeriesLimit)) // FOLLOWUP(prateek): log query too once it's serializable.
+ opts.SeriesLimit = int(i.state.runtimeOpts.maxQuerySeriesLimit)
+ }
+ if i.state.runtimeOpts.maxQueryDocsLimit > 0 && (opts.DocsLimit == 0 ||
+ int64(opts.DocsLimit) > i.state.runtimeOpts.maxQueryDocsLimit) {
+ i.logger.Debug("overriding query response docs limit",
+ zap.Int("requested", opts.DocsLimit),
+ zap.Int64("maxAllowed", i.state.runtimeOpts.maxQueryDocsLimit)) // FOLLOWUP(prateek): log query too once it's serializable.
+ opts.DocsLimit = int(i.state.runtimeOpts.maxQueryDocsLimit)
}
return opts
}
@@ -1352,7 +1743,7 @@ func (i *nsIndex) blocksForQueryWithRLock(queryRange xtime.Ranges) ([]index.Bloc
}
// Remove this range from the query range.
- queryRange = queryRange.RemoveRange(blockRange)
+ queryRange.RemoveRange(blockRange)
blocks = append(blocks, block)
}
@@ -1407,11 +1798,19 @@ func (i *nsIndex) ensureBlockPresentWithRLock(blockStart time.Time) (index.Block
// ok now we know for sure we have to alloc
block, err := i.newBlockFn(blockStart, i.nsMetadata,
- index.BlockOptions{}, i.opts.IndexOptions())
+ index.BlockOptions{}, i.namespaceRuntimeOptsMgr, i.opts.IndexOptions())
if err != nil { // unable to allocate the block, should never happen.
return nil, i.unableToAllocBlockInvariantError(err)
}
+ // NB(bodu): Use same time barrier as `Tick` to make sealing of cold index blocks consistent.
+ // We need to seal cold blocks write away for cold writes.
+ if !blockStart.After(i.lastSealableBlockStart(i.nowFn())) {
+ if err := block.Seal(); err != nil {
+ return nil, err
+ }
+ }
+
// add to tracked blocks map
i.state.blocksByTime[blockStartNanos] = block
@@ -1420,6 +1819,10 @@ func (i *nsIndex) ensureBlockPresentWithRLock(blockStart time.Time) (index.Block
return block, nil
}
+func (i *nsIndex) lastSealableBlockStart(t time.Time) time.Time {
+ return retention.FlushTimeEndForBlockSize(i.blockSize, t.Add(-i.bufferPast))
+}
+
func (i *nsIndex) updateBlockStartsWithLock() {
// update ordered blockStarts slice
var (
@@ -1484,6 +1887,128 @@ func (i *nsIndex) CleanupExpiredFileSets(t time.Time) error {
return i.deleteFilesFn(filesets)
}
+func (i *nsIndex) CleanupDuplicateFileSets() error {
+ fsOpts := i.opts.CommitLogOptions().FilesystemOptions()
+ infoFiles := i.readIndexInfoFilesFn(
+ fsOpts.FilePathPrefix(),
+ i.nsMetadata.ID(),
+ fsOpts.InfoReaderBufferSize(),
+ )
+
+ segmentsOrderByVolumeIndexByVolumeTypeAndBlockStart := make(map[xtime.UnixNano]map[idxpersist.IndexVolumeType][]fs.Segments)
+ for _, file := range infoFiles {
+ seg := fs.NewSegments(file.Info, file.ID.VolumeIndex, file.AbsoluteFilePaths)
+ blockStart := xtime.ToUnixNano(seg.BlockStart())
+ segmentsOrderByVolumeIndexByVolumeType, ok := segmentsOrderByVolumeIndexByVolumeTypeAndBlockStart[blockStart]
+ if !ok {
+ segmentsOrderByVolumeIndexByVolumeType = make(map[idxpersist.IndexVolumeType][]fs.Segments)
+ segmentsOrderByVolumeIndexByVolumeTypeAndBlockStart[blockStart] = segmentsOrderByVolumeIndexByVolumeType
+ }
+
+ volumeType := seg.VolumeType()
+ if _, ok := segmentsOrderByVolumeIndexByVolumeType[volumeType]; !ok {
+ segmentsOrderByVolumeIndexByVolumeType[volumeType] = make([]fs.Segments, 0)
+ }
+ segmentsOrderByVolumeIndexByVolumeType[volumeType] = append(segmentsOrderByVolumeIndexByVolumeType[volumeType], seg)
+ }
+
+ // Ensure that segments are soroted by volume index.
+ for _, segmentsOrderByVolumeIndexByVolumeType := range segmentsOrderByVolumeIndexByVolumeTypeAndBlockStart {
+ for _, segs := range segmentsOrderByVolumeIndexByVolumeType {
+ sort.SliceStable(segs, func(i, j int) bool {
+ return segs[i].VolumeIndex() < segs[j].VolumeIndex()
+ })
+ }
+ }
+
+ multiErr := xerrors.NewMultiError()
+ // Check for dupes and remove.
+ filesToDelete := make([]string, 0)
+ for _, segmentsOrderByVolumeIndexByVolumeType := range segmentsOrderByVolumeIndexByVolumeTypeAndBlockStart {
+ for _, segmentsOrderByVolumeIndex := range segmentsOrderByVolumeIndexByVolumeType {
+ shardTimeRangesCovered := result.NewShardTimeRanges()
+ currSegments := make([]fs.Segments, 0)
+ for _, seg := range segmentsOrderByVolumeIndex {
+ if seg.ShardTimeRanges().IsSuperset(shardTimeRangesCovered) {
+ // Mark dupe segments for deletion.
+ for _, currSeg := range currSegments {
+ filesToDelete = append(filesToDelete, currSeg.AbsoluteFilePaths()...)
+ }
+ currSegments = []fs.Segments{seg}
+ shardTimeRangesCovered = seg.ShardTimeRanges().Copy()
+ continue
+ }
+ currSegments = append(currSegments, seg)
+ shardTimeRangesCovered.AddRanges(seg.ShardTimeRanges())
+ }
+ }
+ }
+ multiErr = multiErr.Add(i.deleteFilesFn(filesToDelete))
+ return multiErr.FinalError()
+}
+
+func (i *nsIndex) DebugMemorySegments(opts DebugMemorySegmentsOptions) error {
+ i.state.RLock()
+ defer i.state.RLock()
+ if i.state.closed {
+ return errDbIndexAlreadyClosed
+ }
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ // Create a new set of file system options to output to new directory.
+ fsOpts := i.opts.CommitLogOptions().
+ FilesystemOptions().
+ SetFilePathPrefix(opts.OutputDirectory)
+
+ for _, block := range i.state.blocksByTime {
+ segmentsData, err := block.MemorySegmentsData(ctx)
+ if err != nil {
+ return err
+ }
+
+ for numSegment, segmentData := range segmentsData {
+ indexWriter, err := fs.NewIndexWriter(fsOpts)
+ if err != nil {
+ return err
+ }
+
+ fileSetID := fs.FileSetFileIdentifier{
+ FileSetContentType: persist.FileSetIndexContentType,
+ Namespace: i.nsMetadata.ID(),
+ BlockStart: block.StartTime(),
+ VolumeIndex: numSegment,
+ }
+ openOpts := fs.IndexWriterOpenOptions{
+ Identifier: fileSetID,
+ BlockSize: i.blockSize,
+ FileSetType: persist.FileSetFlushType,
+ Shards: i.state.shardsAssigned,
+ IndexVolumeType: idxpersist.DefaultIndexVolumeType,
+ }
+ if err := indexWriter.Open(openOpts); err != nil {
+ return err
+ }
+
+ segWriter, err := idxpersist.NewFSTSegmentDataFileSetWriter(segmentData)
+ if err != nil {
+ return err
+ }
+
+ if err := indexWriter.WriteSegmentFileSet(segWriter); err != nil {
+ return err
+ }
+
+ if err := indexWriter.Close(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
func (i *nsIndex) Close() error {
i.state.Lock()
if !i.isOpenWithRLock() {
@@ -1511,6 +2036,11 @@ func (i *nsIndex) Close() error {
i.runtimeOptsListener = nil
}
+ if i.runtimeNsOptsListener != nil {
+ i.runtimeNsOptsListener.Close()
+ i.runtimeNsOptsListener = nil
+ }
+
// Can now unlock after collecting blocks to close and setting closed state.
i.state.Unlock()
@@ -1544,38 +2074,138 @@ func (i *nsIndex) unableToAllocBlockInvariantError(err error) error {
}
type nsIndexMetrics struct {
- AsyncInsertSuccess tally.Counter
- AsyncInsertErrors tally.Counter
- InsertAfterClose tally.Counter
- QueryAfterClose tally.Counter
- InsertEndToEndLatency tally.Timer
- BlocksEvictedMutableSegments tally.Counter
- BlockMetrics nsIndexBlocksMetrics
+ asyncInsertAttemptTotal tally.Counter
+ asyncInsertAttemptSkip tally.Counter
+ asyncInsertAttemptWrite tally.Counter
+
+ asyncInsertSuccess tally.Counter
+ asyncInsertErrors tally.Counter
+ insertAfterClose tally.Counter
+ queryAfterClose tally.Counter
+ forwardIndexHits tally.Counter
+ forwardIndexMisses tally.Counter
+ forwardIndexCounter tally.Counter
+ insertEndToEndLatency tally.Timer
+ blocksEvictedMutableSegments tally.Counter
+ blockMetrics nsIndexBlocksMetrics
+ indexingConcurrencyMin tally.Gauge
+ indexingConcurrencyMax tally.Gauge
+ indexingConcurrencyAvg tally.Gauge
+ flushIndexingConcurrency tally.Gauge
+ flushDocsNew tally.Counter
+ flushDocsCached tally.Counter
+
+ loadedDocsPerQuery tally.Histogram
+ queryExhaustiveSuccess tally.Counter
+ queryExhaustiveInternalError tally.Counter
+ queryNonExhaustiveSuccess tally.Counter
+ queryNonExhaustiveInternalError tally.Counter
+ queryNonExhaustiveLimitError tally.Counter
+ queryNonExhaustiveSeriesLimitError tally.Counter
+ queryNonExhaustiveDocsLimitError tally.Counter
}
func newNamespaceIndexMetrics(
opts index.Options,
iopts instrument.Options,
) nsIndexMetrics {
+ const (
+ indexAttemptName = "index-attempt"
+ forwardIndexName = "forward-index"
+ indexingConcurrency = "indexing-concurrency"
+ flushIndexingConcurrency = "flush-indexing-concurrency"
+ )
scope := iopts.MetricsScope()
blocksScope := scope.SubScope("blocks")
- return nsIndexMetrics{
- AsyncInsertSuccess: scope.Counter("index-success"),
- AsyncInsertErrors: scope.Tagged(map[string]string{
+ m := nsIndexMetrics{
+ asyncInsertAttemptTotal: scope.Tagged(map[string]string{
+ "stage": "process",
+ }).Counter(indexAttemptName),
+ asyncInsertAttemptSkip: scope.Tagged(map[string]string{
+ "stage": "skip",
+ }).Counter(indexAttemptName),
+ asyncInsertAttemptWrite: scope.Tagged(map[string]string{
+ "stage": "write",
+ }).Counter(indexAttemptName),
+ asyncInsertSuccess: scope.Counter("index-success"),
+ asyncInsertErrors: scope.Tagged(map[string]string{
"error_type": "async-insert",
}).Counter("index-error"),
- InsertAfterClose: scope.Tagged(map[string]string{
+ insertAfterClose: scope.Tagged(map[string]string{
"error_type": "insert-closed",
}).Counter("insert-after-close"),
- QueryAfterClose: scope.Tagged(map[string]string{
+ queryAfterClose: scope.Tagged(map[string]string{
"error_type": "query-closed",
}).Counter("query-after-error"),
- InsertEndToEndLatency: instrument.MustCreateSampledTimer(
- scope.Timer("insert-end-to-end-latency"),
- iopts.MetricsSamplingRate()),
- BlocksEvictedMutableSegments: scope.Counter("blocks-evicted-mutable-segments"),
- BlockMetrics: newNamespaceIndexBlocksMetrics(opts, blocksScope),
- }
+ forwardIndexHits: scope.Tagged(map[string]string{
+ "status": "hit",
+ }).Counter(forwardIndexName),
+ forwardIndexMisses: scope.Tagged(map[string]string{
+ "status": "miss",
+ }).Counter(forwardIndexName),
+ forwardIndexCounter: scope.Tagged(map[string]string{
+ "status": "count",
+ }).Counter(forwardIndexName),
+ insertEndToEndLatency: instrument.NewTimer(scope,
+ "insert-end-to-end-latency", iopts.TimerOptions()),
+ blocksEvictedMutableSegments: scope.Counter("blocks-evicted-mutable-segments"),
+ blockMetrics: newNamespaceIndexBlocksMetrics(opts, blocksScope),
+ indexingConcurrencyMin: scope.Tagged(map[string]string{
+ "stat": "min",
+ }).Gauge(indexingConcurrency),
+ indexingConcurrencyMax: scope.Tagged(map[string]string{
+ "stat": "max",
+ }).Gauge(indexingConcurrency),
+ indexingConcurrencyAvg: scope.Tagged(map[string]string{
+ "stat": "avg",
+ }).Gauge(indexingConcurrency),
+ flushIndexingConcurrency: scope.Gauge(flushIndexingConcurrency),
+ flushDocsNew: scope.Tagged(map[string]string{
+ "status": "new",
+ }).Counter("flush-docs"),
+ flushDocsCached: scope.Tagged(map[string]string{
+ "status": "cached",
+ }).Counter("flush-docs"),
+ loadedDocsPerQuery: scope.Histogram(
+ "loaded-docs-per-query",
+ tally.MustMakeExponentialValueBuckets(10, 2, 16),
+ ),
+ queryExhaustiveSuccess: scope.Tagged(map[string]string{
+ "exhaustive": "true",
+ "result": "success",
+ }).Counter("query"),
+ queryExhaustiveInternalError: scope.Tagged(map[string]string{
+ "exhaustive": "true",
+ "result": "error_internal",
+ }).Counter("query"),
+ queryNonExhaustiveSuccess: scope.Tagged(map[string]string{
+ "exhaustive": "false",
+ "result": "success",
+ }).Counter("query"),
+ queryNonExhaustiveInternalError: scope.Tagged(map[string]string{
+ "exhaustive": "false",
+ "result": "error_internal",
+ }).Counter("query"),
+ queryNonExhaustiveLimitError: scope.Tagged(map[string]string{
+ "exhaustive": "false",
+ "result": "error_require_exhaustive",
+ }).Counter("query"),
+ queryNonExhaustiveSeriesLimitError: scope.Tagged(map[string]string{
+ "exhaustive": "false",
+ "result": "error_series_require_exhaustive",
+ }).Counter("query"),
+ queryNonExhaustiveDocsLimitError: scope.Tagged(map[string]string{
+ "exhaustive": "false",
+ "result": "error_docs_require_exhaustive",
+ }).Counter("query"),
+ }
+
+ // Initialize gauges that should default to zero before
+ // returning results so that they are exported with an
+ // explicit zero value at process startup.
+ m.flushIndexingConcurrency.Update(0)
+
+ return m
}
type nsIndexBlocksMetrics struct {
diff --git a/src/dbnode/storage/index/README.md b/src/dbnode/storage/index/README.md
new file mode 100644
index 0000000000..88bb901e9e
--- /dev/null
+++ b/src/dbnode/storage/index/README.md
@@ -0,0 +1,9 @@
+# index
+
+Index related documentation.
+
+## In-memory index cold flush consistency model
+
+Index writes go into the active cold mutable segment when an index block is sealed. Index blocks are sealed during ticks when they are a full index block past buffer past + block size.
+
+At the beginning of a cold flush we rotate out the active cold mutable segment. In-mem index writes are then evicted from mem when a cold flush completes and they are evicted.
diff --git a/src/dbnode/storage/index/aggregate_results.go b/src/dbnode/storage/index/aggregate_results.go
index 427d95db79..18daa66f1c 100644
--- a/src/dbnode/storage/index/aggregate_results.go
+++ b/src/dbnode/storage/index/aggregate_results.go
@@ -31,13 +31,18 @@ import (
const missingDocumentFields = "invalid document fields: empty %s"
+// NB: emptyValues is an AggregateValues with no values, used for tracking
+// terms only rather than terms and values.
+var emptyValues = AggregateValues{hasValues: false}
+
type aggregatedResults struct {
sync.RWMutex
nsID ident.ID
aggregateOpts AggregateResultsOptions
- resultsMap *AggregateResultsMap
+ resultsMap *AggregateResultsMap
+ totalDocsCount int
idPool ident.Pool
bytesPool pool.CheckedBytesPool
@@ -90,26 +95,30 @@ func (r *aggregatedResults) Reset(
// reset all keys in the map next
r.resultsMap.Reset()
+ r.totalDocsCount = 0
// NB: could do keys+value in one step but I'm trying to avoid
// using an internal method of a code-gen'd type.
r.Unlock()
}
-func (r *aggregatedResults) AddDocuments(batch []doc.Document) (int, error) {
+func (r *aggregatedResults) AddDocuments(batch []doc.Document) (int, int, error) {
r.Lock()
err := r.addDocumentsBatchWithLock(batch)
size := r.resultsMap.Len()
+ docsCount := r.totalDocsCount + len(batch)
+ r.totalDocsCount = docsCount
r.Unlock()
- return size, err
+ return size, docsCount, err
}
func (r *aggregatedResults) AggregateResultsOptions() AggregateResultsOptions {
return r.aggregateOpts
}
-func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) int {
+func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) (int, int) {
r.Lock()
+ valueInsertions := 0
for _, entry := range batch {
f := entry.Field
aggValues, ok := r.resultsMap.Get(f)
@@ -135,6 +144,7 @@ func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) int {
NoCopyKey: true,
NoFinalizeKey: false,
})
+ valueInsertions++
} else {
// because we already have a entry for this term, we release the ident back to
// the underlying pool.
@@ -143,8 +153,10 @@ func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) int {
}
}
size := r.resultsMap.Len()
+ docsCount := r.totalDocsCount + valueInsertions
+ r.totalDocsCount = docsCount
r.Unlock()
- return size
+ return size, docsCount
}
func (r *aggregatedResults) addDocumentsBatchWithLock(
@@ -214,7 +226,7 @@ func (r *aggregatedResults) addTermWithLock(
// Set results map to an empty AggregateValues since we only care about
// existence of the term in the map, rather than its set of values.
- r.resultsMap.Set(termID, r.valuesPool.Get())
+ r.resultsMap.Set(termID, emptyValues)
return nil
}
@@ -294,6 +306,13 @@ func (r *aggregatedResults) Size() int {
return l
}
+func (r *aggregatedResults) TotalDocsCount() int {
+ r.RLock()
+ count := r.totalDocsCount
+ r.RUnlock()
+ return count
+}
+
func (r *aggregatedResults) Finalize() {
r.Reset(nil, AggregateResultsOptions{})
if r.pool == nil {
diff --git a/src/dbnode/storage/index/aggregate_results_new_map.go b/src/dbnode/storage/index/aggregate_results_new_map.go
index a81a6faf72..cdb576533f 100644
--- a/src/dbnode/storage/index/aggregate_results_new_map.go
+++ b/src/dbnode/storage/index/aggregate_results_new_map.go
@@ -23,7 +23,7 @@ package index
import (
"github.com/m3db/m3/src/x/ident"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
const (
diff --git a/src/dbnode/storage/index/aggregate_results_test.go b/src/dbnode/storage/index/aggregate_results_test.go
index 63af80b976..0f69b8f912 100644
--- a/src/dbnode/storage/index/aggregate_results_test.go
+++ b/src/dbnode/storage/index/aggregate_results_test.go
@@ -51,22 +51,47 @@ func genDoc(strs ...string) doc.Document {
func TestAggResultsInsertInvalid(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
dInvalid := doc.Document{Fields: []doc.Field{{}}}
- size, err := res.AddDocuments([]doc.Document{dInvalid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
+ require.Equal(t, 1, docsCount)
+
+ require.Equal(t, 0, res.Size())
+ require.Equal(t, 1, res.TotalDocsCount())
dInvalid = genDoc("", "foo")
- size, err = res.AddDocuments([]doc.Document{dInvalid})
+ size, docsCount, err = res.AddDocuments([]doc.Document{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
+ require.Equal(t, 2, docsCount)
+
+ require.Equal(t, 0, res.Size())
+ require.Equal(t, 2, res.TotalDocsCount())
}
func TestAggResultsInsertEmptyTermValue(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
dValidEmptyTerm := genDoc("foo", "")
- size, err := res.AddDocuments([]doc.Document{dValidEmptyTerm})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValidEmptyTerm})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 1, res.TotalDocsCount())
+}
+
+func TestAggResultsInsertBatchOfTwo(t *testing.T) {
+ res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
+ d1 := genDoc("d1", "")
+ d2 := genDoc("d2", "")
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1, d2})
+ require.NoError(t, err)
+ require.Equal(t, 2, size)
+ require.Equal(t, 2, docsCount)
+
+ require.Equal(t, 2, res.Size())
+ require.Equal(t, 2, res.TotalDocsCount())
}
func TestAggResultsTermOnlyInsert(t *testing.T) {
@@ -74,30 +99,50 @@ func TestAggResultsTermOnlyInsert(t *testing.T) {
Type: AggregateTagNames,
}, testOpts)
dInvalid := doc.Document{Fields: []doc.Field{{}}}
- size, err := res.AddDocuments([]doc.Document{dInvalid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
+ require.Equal(t, 1, docsCount)
+
+ require.Equal(t, 0, res.Size())
+ require.Equal(t, 1, res.TotalDocsCount())
dInvalid = genDoc("", "foo")
- size, err = res.AddDocuments([]doc.Document{dInvalid})
+ size, docsCount, err = res.AddDocuments([]doc.Document{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
+ require.Equal(t, 2, docsCount)
+
+ require.Equal(t, 0, res.Size())
+ require.Equal(t, 2, res.TotalDocsCount())
valid := genDoc("foo", "")
- size, err = res.AddDocuments([]doc.Document{valid})
+ size, docsCount, err = res.AddDocuments([]doc.Document{valid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 3, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 3, res.TotalDocsCount())
}
func testAggResultsInsertIdempotency(t *testing.T, res AggregateResults) {
dValid := genDoc("foo", "bar")
- size, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 1, res.TotalDocsCount())
- size, err = res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err = res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 2, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 2, res.TotalDocsCount())
}
func TestAggResultsInsertIdempotency(t *testing.T) {
@@ -117,17 +162,19 @@ func TestInvalidAggregateType(t *testing.T) {
Type: 100,
}, testOpts)
dValid := genDoc("foo", "bar")
- size, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
require.Error(t, err)
require.Equal(t, 0, size)
+ require.Equal(t, 1, docsCount)
}
func TestAggResultsSameName(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
d1 := genDoc("foo", "bar")
- size, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
rMap := res.Map()
aggVals, ok := rMap.Get(ident.StringID("foo"))
@@ -136,9 +183,10 @@ func TestAggResultsSameName(t *testing.T) {
assert.True(t, aggVals.Map().Contains(ident.StringID("bar")))
d2 := genDoc("foo", "biz")
- size, err = res.AddDocuments([]doc.Document{d2})
+ size, docsCount, err = res.AddDocuments([]doc.Document{d2})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 2, docsCount)
aggVals, ok = rMap.Get(ident.StringID("foo"))
require.True(t, ok)
@@ -147,63 +195,63 @@ func TestAggResultsSameName(t *testing.T) {
assert.True(t, aggVals.Map().Contains(ident.StringID("biz")))
}
+func assertNoValuesInNameOnlyAggregate(t *testing.T, v AggregateValues) {
+ assert.False(t, v.hasValues)
+ assert.Nil(t, v.valuesMap)
+ assert.Nil(t, v.pool)
+
+ assert.Equal(t, 0, v.Size())
+ assert.Nil(t, v.Map())
+ assert.False(t, v.HasValues())
+}
+
func TestAggResultsTermOnlySameName(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{
Type: AggregateTagNames,
}, testOpts)
d1 := genDoc("foo", "bar")
- size, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
rMap := res.Map()
aggVals, ok := rMap.Get(ident.StringID("foo"))
require.True(t, ok)
- require.Equal(t, 0, aggVals.Size())
+ assertNoValuesInNameOnlyAggregate(t, aggVals)
d2 := genDoc("foo", "biz")
- size, err = res.AddDocuments([]doc.Document{d2})
+ size, docsCount, err = res.AddDocuments([]doc.Document{d2})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 2, docsCount)
aggVals, ok = rMap.Get(ident.StringID("foo"))
require.True(t, ok)
- require.Equal(t, 0, aggVals.Size())
-}
-
-func assertContains(t *testing.T,
- ex map[string][]string, ac *AggregateResultsMap) {
- require.Equal(t, len(ex), ac.Len())
- for k, v := range ex {
- aggVals, ok := ac.Get(ident.StringID(k))
- require.True(t, ok)
- require.Equal(t, len(v), aggVals.Size())
- for _, actual := range v {
- require.True(t, aggVals.Map().Contains(ident.StringID(actual)))
- }
- }
+ require.False(t, aggVals.hasValues)
+ assertNoValuesInNameOnlyAggregate(t, aggVals)
}
-func addMultipleDocuments(t *testing.T, res AggregateResults) int {
- _, err := res.AddDocuments([]doc.Document{
+func addMultipleDocuments(t *testing.T, res AggregateResults) (int, int) {
+ _, _, err := res.AddDocuments([]doc.Document{
genDoc("foo", "bar"),
genDoc("fizz", "bar"),
genDoc("buzz", "bar"),
})
require.NoError(t, err)
- _, err = res.AddDocuments([]doc.Document{
+ _, _, err = res.AddDocuments([]doc.Document{
genDoc("foo", "biz"),
genDoc("fizz", "bar"),
})
require.NoError(t, err)
- size, err := res.AddDocuments([]doc.Document{
+ size, docsCount, err := res.AddDocuments([]doc.Document{
genDoc("foo", "baz", "buzz", "bag", "qux", "qaz"),
})
require.NoError(t, err)
- return size
+ return size, docsCount
}
func expectedTermsOnly(ex map[string][]string) map[string][]string {
@@ -233,28 +281,28 @@ var mergeTests = []struct {
name: "no limit no filter",
opts: AggregateResultsOptions{},
expected: map[string][]string{
- "foo": []string{"bar", "biz", "baz"},
- "fizz": []string{"bar"},
- "buzz": []string{"bar", "bag"},
- "qux": []string{"qaz"},
+ "foo": {"bar", "biz", "baz"},
+ "fizz": {"bar"},
+ "buzz": {"bar", "bag"},
+ "qux": {"qaz"},
},
},
{
name: "with limit no filter",
opts: AggregateResultsOptions{SizeLimit: 2},
expected: map[string][]string{
- "foo": []string{"bar", "biz", "baz"},
- "fizz": []string{"bar"},
+ "foo": {"bar", "biz", "baz"},
+ "fizz": {"bar"},
},
},
{
name: "no limit empty filter",
opts: AggregateResultsOptions{FieldFilter: toFilter()},
expected: map[string][]string{
- "foo": []string{"bar", "biz", "baz"},
- "fizz": []string{"bar"},
- "buzz": []string{"bar", "bag"},
- "qux": []string{"qaz"},
+ "foo": {"bar", "biz", "baz"},
+ "fizz": {"bar"},
+ "buzz": {"bar", "bag"},
+ "qux": {"qaz"},
},
},
{
@@ -266,7 +314,7 @@ var mergeTests = []struct {
name: "empty limit with filter",
opts: AggregateResultsOptions{FieldFilter: toFilter("buzz")},
expected: map[string][]string{
- "buzz": []string{"bar", "bag"},
+ "buzz": {"bar", "bag"},
},
},
{
@@ -274,8 +322,8 @@ var mergeTests = []struct {
opts: AggregateResultsOptions{
SizeLimit: 2, FieldFilter: toFilter("buzz", "qux", "fizz")},
expected: map[string][]string{
- "fizz": []string{"bar"},
- "buzz": []string{"bar", "bag"},
+ "fizz": {"bar"},
+ "buzz": {"bar", "bag"},
},
},
}
@@ -284,19 +332,41 @@ func TestAggResultsMerge(t *testing.T) {
for _, tt := range mergeTests {
t.Run(tt.name, func(t *testing.T) {
res := NewAggregateResults(nil, tt.opts, testOpts)
- size := addMultipleDocuments(t, res)
+ size, docsCount := addMultipleDocuments(t, res)
require.Equal(t, len(tt.expected), size)
- assertContains(t, tt.expected, res.Map())
+ require.Equal(t, 6, docsCount)
+ ac := res.Map()
+ require.Equal(t, len(tt.expected), ac.Len())
+ for k, v := range tt.expected {
+ aggVals, ok := ac.Get(ident.StringID(k))
+ require.True(t, ok)
+ require.Equal(t, len(v), aggVals.Size())
+ for _, actual := range v {
+ require.True(t, aggVals.Map().Contains(ident.StringID(actual)))
+ }
+ }
})
+ }
+}
+func TestAggResultsMergeNameOnly(t *testing.T) {
+ for _, tt := range mergeTests {
t.Run(tt.name+" name only", func(t *testing.T) {
tt.opts.Type = AggregateTagNames
res := NewAggregateResults(nil, tt.opts, testOpts)
- size := addMultipleDocuments(t, res)
+ size, docsCount := addMultipleDocuments(t, res)
require.Equal(t, len(tt.expected), size)
- assertContains(t, expectedTermsOnly(tt.expected), res.Map())
+ require.Equal(t, 6, docsCount)
+
+ ac := res.Map()
+ require.Equal(t, len(tt.expected), ac.Len())
+ for k := range tt.expected {
+ aggVals, ok := ac.Get(ident.StringID(k))
+ require.True(t, ok)
+ assertNoValuesInNameOnlyAggregate(t, aggVals)
+ }
})
}
}
@@ -306,9 +376,10 @@ func TestAggResultsInsertCopies(t *testing.T) {
dValid := genDoc("foo", "bar")
name := dValid.Fields[0].Name
value := dValid.Fields[0].Value
- size, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
found := false
@@ -347,9 +418,10 @@ func TestAggResultsNameOnlyInsertCopies(t *testing.T) {
}, testOpts)
dValid := genDoc("foo", "bar")
name := dValid.Fields[0].Name
- size, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
found := false
// our genny generated maps don't provide access to MapEntry directly,
@@ -366,9 +438,7 @@ func TestAggResultsNameOnlyInsertCopies(t *testing.T) {
// than the original.
require.False(t, xtest.ByteSlicesBackedBySameData(n, name))
found = true
- v := entry.Value()
- require.NotNil(t, v.Map())
- require.Equal(t, 0, v.Size())
+ assertNoValuesInNameOnlyAggregate(t, entry.Value())
}
require.True(t, found)
@@ -378,9 +448,10 @@ func TestAggResultsReset(t *testing.T) {
res := NewAggregateResults(ident.StringID("qux"),
AggregateResultsOptions{}, testOpts)
d1 := genDoc("foo", "bar")
- size, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
aggVals, ok := res.Map().Get(ident.StringID("foo"))
require.True(t, ok)
@@ -427,9 +498,10 @@ func TestAggResultFinalize(t *testing.T) {
// Create a Results and insert some data.
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
d1 := genDoc("foo", "bar")
- size, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
// Ensure the data is present.
rMap := res.Map()
diff --git a/src/dbnode/storage/index/aggregate_values.go b/src/dbnode/storage/index/aggregate_values.go
index 3c87b43640..7b7917f862 100644
--- a/src/dbnode/storage/index/aggregate_values.go
+++ b/src/dbnode/storage/index/aggregate_values.go
@@ -22,22 +22,21 @@ package index
import (
"github.com/m3db/m3/src/x/ident"
- "github.com/m3db/m3/src/x/pool"
)
// AggregateValues is a collection of unique identity values backed by a pool.
// NB: there are no synchronization guarantees provided by default.
type AggregateValues struct {
+ hasValues bool
valuesMap *AggregateValuesMap
- bytesPool pool.CheckedBytesPool
pool AggregateValuesPool
}
// NewAggregateValues returns a new AggregateValues object.
func NewAggregateValues(opts Options) AggregateValues {
return AggregateValues{
+ hasValues: true,
valuesMap: NewAggregateValuesMap(opts.IdentifierPool()),
- bytesPool: opts.CheckedBytesPool(),
pool: opts.AggregateValuesPool(),
}
}
@@ -54,6 +53,11 @@ func MustNewAggregateValues(opts Options, ids ...ident.ID) AggregateValues {
return m
}
+// HasValues returns true if this has an aggregate values map.
+func (v *AggregateValues) HasValues() bool {
+ return v.hasValues
+}
+
// Map returns a map from an ID -> empty struct to signify existence of the
// ID in the set this structure represents.
func (v *AggregateValues) Map() *AggregateValuesMap {
@@ -62,10 +66,19 @@ func (v *AggregateValues) Map() *AggregateValuesMap {
// Size returns the number of IDs tracked.
func (v *AggregateValues) Size() int {
+ if !v.hasValues {
+ return 0
+ }
+
return v.valuesMap.Len()
}
func (v *AggregateValues) finalize() {
+ // NB: if this aggregate values has no values, no need to finalize.
+ if !v.hasValues {
+ return
+ }
+
// NB: resetting the value map will already finalize all copies of the keys.
v.valuesMap.Reset()
diff --git a/src/dbnode/storage/index/aggregate_values_new_map.go b/src/dbnode/storage/index/aggregate_values_new_map.go
index 7bc29d4eef..70b80e2a47 100644
--- a/src/dbnode/storage/index/aggregate_values_new_map.go
+++ b/src/dbnode/storage/index/aggregate_values_new_map.go
@@ -23,7 +23,7 @@ package index
import (
"github.com/m3db/m3/src/x/ident"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
const (
diff --git a/src/dbnode/storage/index/block.go b/src/dbnode/storage/index/block.go
index 324fc8ab37..d7f2668fd8 100644
--- a/src/dbnode/storage/index/block.go
+++ b/src/dbnode/storage/index/block.go
@@ -24,26 +24,25 @@ import (
"bytes"
"errors"
"fmt"
+ "io"
"sync"
"time"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
- "github.com/m3db/m3/src/dbnode/storage/index/compaction"
- "github.com/m3db/m3/src/dbnode/storage/index/segments"
+ "github.com/m3db/m3/src/dbnode/storage/stats"
"github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/m3ninx/doc"
m3ninxindex "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
- "github.com/m3db/m3/src/m3ninx/index/segment/builder"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/m3ninx/search"
"github.com/m3db/m3/src/m3ninx/search/executor"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
- "github.com/m3db/m3/src/x/mmap"
"github.com/m3db/m3/src/x/resource"
xtime "github.com/m3db/m3/src/x/time"
@@ -59,16 +58,12 @@ var (
// ErrUnableReportStatsBlockClosed is returned from Stats when the block is closed.
ErrUnableReportStatsBlockClosed = errors.New("unable to report stats, block is closed")
- errUnableToWriteBlockClosed = errors.New("unable to write, index block is closed")
- errUnableToWriteBlockSealed = errors.New("unable to write, index block is sealed")
- errUnableToWriteBlockConcurrent = errors.New("unable to write, index block is being written to already")
- errUnableToBootstrapBlockClosed = errors.New("unable to bootstrap, block is closed")
- errUnableToTickBlockClosed = errors.New("unable to tick, block is closed")
- errBlockAlreadyClosed = errors.New("unable to close, block already closed")
- errForegroundCompactorNoPlan = errors.New("index foreground compactor failed to generate a plan")
- errForegroundCompactorBadPlanFirstTask = errors.New("index foreground compactor generated plan without mutable segment in first task")
- errForegroundCompactorBadPlanSecondaryTask = errors.New("index foreground compactor generated plan with mutable segment a secondary task")
- errCancelledQuery = errors.New("query was cancelled")
+ errUnableToWriteBlockClosed = errors.New("unable to write, index block is closed")
+ errUnableToWriteBlockSealed = errors.New("unable to write, index block is sealed")
+ errUnableToBootstrapBlockClosed = errors.New("unable to bootstrap, block is closed")
+ errUnableToTickBlockClosed = errors.New("unable to tick, block is closed")
+ errBlockAlreadyClosed = errors.New("unable to close, block already closed")
+ errCancelledQuery = errors.New("query was cancelled")
errUnableToSealBlockIllegalStateFmtString = "unable to seal, index block state: %v"
errUnableToWriteBlockUnknownStateFmtString = "unable to write, unknown index block state: %v"
@@ -103,60 +98,71 @@ func (s blockState) String() string {
type newExecutorFn func() (search.Executor, error)
-// nolint: maligned
-type block struct {
- sync.RWMutex
+type shardRangesSegmentsByVolumeType map[persist.IndexVolumeType][]blockShardRangesSegments
- state blockState
- hasEvictedMutableSegmentsAnyTimes bool
+func (s shardRangesSegmentsByVolumeType) forEachSegment(cb func(segment segment.Segment) error) error {
+ return s.forEachSegmentGroup(func(group blockShardRangesSegments) error {
+ for _, seg := range group.segments {
+ if err := cb(seg); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
- foregroundSegments []*readableSeg
- backgroundSegments []*readableSeg
- shardRangesSegments []blockShardRangesSegments
+func (s shardRangesSegmentsByVolumeType) forEachSegmentGroup(cb func(group blockShardRangesSegments) error) error {
+ for _, shardRangesSegments := range s {
+ for _, group := range shardRangesSegments {
+ if err := cb(group); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
- newFieldsAndTermsIteratorFn newFieldsAndTermsIteratorFn
- newExecutorFn newExecutorFn
- blockStart time.Time
- blockEnd time.Time
- blockSize time.Duration
- blockOpts BlockOptions
- opts Options
- iopts instrument.Options
- nsMD namespace.Metadata
+// nolint: maligned
+type block struct {
+ sync.RWMutex
- compact blockCompact
+ state blockState
+
+ mutableSegments *mutableSegments
+ coldMutableSegments []*mutableSegments
+ shardRangesSegmentsByVolumeType shardRangesSegmentsByVolumeType
+ newFieldsAndTermsIteratorFn newFieldsAndTermsIteratorFn
+ newExecutorWithRLockFn newExecutorFn
+ blockStart time.Time
+ blockEnd time.Time
+ blockSize time.Duration
+ opts Options
+ iopts instrument.Options
+ blockOpts BlockOptions
+ nsMD namespace.Metadata
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager
+ queryStats stats.QueryStats
metrics blockMetrics
logger *zap.Logger
}
type blockMetrics struct {
- rotateActiveSegment tally.Counter
- rotateActiveSegmentAge tally.Timer
- rotateActiveSegmentSize tally.Histogram
- foregroundCompactionPlanRunLatency tally.Timer
- foregroundCompactionTaskRunLatency tally.Timer
- backgroundCompactionPlanRunLatency tally.Timer
- backgroundCompactionTaskRunLatency tally.Timer
- segmentFreeMmapSuccess tally.Counter
- segmentFreeMmapError tally.Counter
- segmentFreeMmapSkipNotImmutable tally.Counter
+ rotateActiveSegment tally.Counter
+ rotateActiveSegmentAge tally.Timer
+ rotateActiveSegmentSize tally.Histogram
+ segmentFreeMmapSuccess tally.Counter
+ segmentFreeMmapError tally.Counter
+ segmentFreeMmapSkipNotImmutable tally.Counter
}
func newBlockMetrics(s tally.Scope) blockMetrics {
- s = s.SubScope("index").SubScope("block")
- foregroundScope := s.Tagged(map[string]string{"compaction-type": "foreground"})
- backgroundScope := s.Tagged(map[string]string{"compaction-type": "background"})
segmentFreeMmap := "segment-free-mmap"
return blockMetrics{
rotateActiveSegment: s.Counter("rotate-active-segment"),
rotateActiveSegmentAge: s.Timer("rotate-active-segment-age"),
rotateActiveSegmentSize: s.Histogram("rotate-active-segment-size",
append(tally.ValueBuckets{0}, tally.MustMakeExponentialValueBuckets(100, 2, 16)...)),
- foregroundCompactionPlanRunLatency: foregroundScope.Timer("compaction-plan-run-latency"),
- foregroundCompactionTaskRunLatency: foregroundScope.Timer("compaction-task-run-latency"),
- backgroundCompactionPlanRunLatency: backgroundScope.Timer("compaction-plan-run-latency"),
- backgroundCompactionTaskRunLatency: backgroundScope.Timer("compaction-task-run-latency"),
segmentFreeMmapSuccess: s.Tagged(map[string]string{
"result": "success",
"skip_type": "none",
@@ -186,29 +192,67 @@ type BlockOptions struct {
BackgroundCompactorMmapDocsData bool
}
+// NewBlockFn is a new block constructor.
+type NewBlockFn func(
+ blockStart time.Time,
+ md namespace.Metadata,
+ blockOpts BlockOptions,
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,
+ opts Options,
+) (Block, error)
+
+// Ensure NewBlock implements NewBlockFn.
+var _ NewBlockFn = NewBlock
+
// NewBlock returns a new Block, representing a complete reverse index for the
// duration of time specified. It is backed by one or more segments.
func NewBlock(
blockStart time.Time,
md namespace.Metadata,
- opts BlockOptions,
- indexOpts Options,
+ blockOpts BlockOptions,
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,
+ opts Options,
) (Block, error) {
blockSize := md.Options().IndexOptions().BlockSize()
- iopts := indexOpts.InstrumentOptions()
+ iopts := opts.InstrumentOptions()
+ scope := iopts.MetricsScope().SubScope("index").SubScope("block")
+ iopts = iopts.SetMetricsScope(scope)
+ segs := newMutableSegments(
+ blockStart,
+ opts,
+ blockOpts,
+ namespaceRuntimeOptsMgr,
+ iopts,
+ )
+ // NB(bodu): The length of coldMutableSegments is always at least 1.
+ coldSegs := []*mutableSegments{
+ newMutableSegments(
+ blockStart,
+ opts,
+ blockOpts,
+ namespaceRuntimeOptsMgr,
+ iopts,
+ ),
+ }
b := &block{
- state: blockStateOpen,
- blockStart: blockStart,
- blockEnd: blockStart.Add(blockSize),
- blockSize: blockSize,
- opts: indexOpts,
- iopts: iopts,
- nsMD: md,
- metrics: newBlockMetrics(iopts.MetricsScope()),
- logger: iopts.Logger(),
+ state: blockStateOpen,
+ blockStart: blockStart,
+ blockEnd: blockStart.Add(blockSize),
+ blockSize: blockSize,
+ blockOpts: blockOpts,
+ mutableSegments: segs,
+ coldMutableSegments: coldSegs,
+ shardRangesSegmentsByVolumeType: make(shardRangesSegmentsByVolumeType),
+ opts: opts,
+ iopts: iopts,
+ nsMD: md,
+ namespaceRuntimeOptsMgr: namespaceRuntimeOptsMgr,
+ metrics: newBlockMetrics(scope),
+ logger: iopts.Logger(),
+ queryStats: opts.QueryStats(),
}
b.newFieldsAndTermsIteratorFn = newFieldsAndTermsIterator
- b.newExecutorFn = b.executorWithRLock
+ b.newExecutorWithRLockFn = b.executorWithRLock
return b, nil
}
@@ -221,258 +265,19 @@ func (b *block) EndTime() time.Time {
return b.blockEnd
}
-func (b *block) maybeBackgroundCompactWithLock() {
- if b.compact.compactingBackground || b.state != blockStateOpen {
- return
- }
-
- // Create a logical plan.
- segs := make([]compaction.Segment, 0, len(b.backgroundSegments))
- for _, seg := range b.backgroundSegments {
- segs = append(segs, compaction.Segment{
- Age: seg.Age(),
- Size: seg.Segment().Size(),
- Type: segments.FSTType,
- Segment: seg.Segment(),
- })
- }
-
- plan, err := compaction.NewPlan(segs, b.opts.BackgroundCompactionPlannerOptions())
- if err != nil {
- instrument.EmitAndLogInvariantViolation(b.iopts, func(l *zap.Logger) {
- l.Error("index background compaction plan error", zap.Error(err))
- })
- return
- }
-
- if len(plan.Tasks) == 0 {
- return
- }
-
- // Kick off compaction.
- b.compact.compactingBackground = true
- go func() {
- b.backgroundCompactWithPlan(plan)
-
- b.Lock()
- b.compact.compactingBackground = false
- b.cleanupBackgroundCompactWithLock()
- b.Unlock()
- }()
-}
-
-func (b *block) shouldEvictCompactedSegmentsWithLock() bool {
- // NB(r): The frozen/compacted segments are derived segments of the
- // active mutable segment, if we ever evict that segment then
- // we don't need the frozen/compacted segments either and should
- // shed them from memory.
- return b.state == blockStateClosed ||
- b.hasEvictedMutableSegmentsAnyTimes
-}
-
-func (b *block) cleanupBackgroundCompactWithLock() {
- if b.state == blockStateOpen {
- // See if we need to trigger another compaction.
- b.maybeBackgroundCompactWithLock()
- return
- }
-
- // Check if need to close all the compacted segments due to
- // having evicted mutable segments or the block being closed.
- if !b.shouldEvictCompactedSegmentsWithLock() {
- return
- }
-
- // Evict compacted segments.
- b.closeCompactedSegments(b.backgroundSegments)
- b.backgroundSegments = nil
-
- // Free compactor resources.
- if b.compact.backgroundCompactor == nil {
- return
- }
-
- if err := b.compact.backgroundCompactor.Close(); err != nil {
- instrument.EmitAndLogInvariantViolation(b.iopts, func(l *zap.Logger) {
- l.Error("error closing index block background compactor", zap.Error(err))
- })
- }
- b.compact.backgroundCompactor = nil
-}
-
-func (b *block) closeCompactedSegments(segments []*readableSeg) {
- for _, seg := range segments {
- err := seg.Segment().Close()
- if err != nil {
- instrument.EmitAndLogInvariantViolation(b.iopts, func(l *zap.Logger) {
- l.Error("could not close compacted segment", zap.Error(err))
- })
- }
- }
-}
-
-func (b *block) backgroundCompactWithPlan(plan *compaction.Plan) {
- sw := b.metrics.backgroundCompactionPlanRunLatency.Start()
- defer sw.Stop()
-
- n := b.compact.numBackground
- b.compact.numBackground++
-
- logger := b.logger.With(
- zap.Time("block", b.blockStart),
- zap.Int("numBackgroundCompaction", n),
- )
- log := n%compactDebugLogEvery == 0
- if log {
- for i, task := range plan.Tasks {
- summary := task.Summary()
- logger.Debug("planned background compaction task",
- zap.Int("task", i),
- zap.Int("numMutable", summary.NumMutable),
- zap.Int("numFST", summary.NumFST),
- zap.String("cumulativeMutableAge", summary.CumulativeMutableAge.String()),
- zap.Int64("cumulativeSize", summary.CumulativeSize),
- )
- }
- }
-
- for i, task := range plan.Tasks {
- err := b.backgroundCompactWithTask(task, log,
- logger.With(zap.Int("task", i)))
- if err != nil {
- instrument.EmitAndLogInvariantViolation(b.iopts, func(l *zap.Logger) {
- l.Error("error compacting segments", zap.Error(err))
- })
- return
- }
- }
-}
-
-func (b *block) backgroundCompactWithTask(
- task compaction.Task,
- log bool,
- logger *zap.Logger,
-) error {
- if log {
- logger.Debug("start compaction task")
- }
-
- segments := make([]segment.Segment, 0, len(task.Segments))
- for _, seg := range task.Segments {
- segments = append(segments, seg.Segment)
- }
-
- start := time.Now()
- compacted, err := b.compact.backgroundCompactor.Compact(segments, mmap.ReporterOptions{
- Context: mmap.Context{
- Name: mmapIndexBlockName,
- },
- Reporter: b.opts.MmapReporter(),
- })
- took := time.Since(start)
- b.metrics.backgroundCompactionTaskRunLatency.Record(took)
-
- if log {
- logger.Debug("done compaction task", zap.Duration("took", took))
- }
-
- if err != nil {
- return err
- }
-
- // Rotate out the replaced frozen segments and add the compacted one.
- b.Lock()
- defer b.Unlock()
-
- result := b.addCompactedSegmentFromSegments(b.backgroundSegments,
- segments, compacted)
- b.backgroundSegments = result
-
- return nil
-}
-
-func (b *block) addCompactedSegmentFromSegments(
- current []*readableSeg,
- segmentsJustCompacted []segment.Segment,
- compacted segment.Segment,
-) []*readableSeg {
- result := make([]*readableSeg, 0, len(current))
- for _, existing := range current {
- keepCurr := true
- for _, seg := range segmentsJustCompacted {
- if existing.Segment() == seg {
- // Do not keep this one, it was compacted just then.
- keepCurr = false
- break
- }
- }
-
- if keepCurr {
- result = append(result, existing)
- continue
- }
-
- err := existing.Segment().Close()
- if err != nil {
- // Already compacted, not much we can do about not closing it.
- instrument.EmitAndLogInvariantViolation(b.iopts, func(l *zap.Logger) {
- l.Error("unable to close compacted block", zap.Error(err))
- })
- }
- }
-
- // Return all the ones we kept plus the new compacted segment
- return append(result, newReadableSeg(compacted, b.opts))
-}
-
func (b *block) WriteBatch(inserts *WriteBatch) (WriteBatchResult, error) {
- b.Lock()
- if b.state != blockStateOpen {
- b.Unlock()
+ b.RLock()
+ if !b.writesAcceptedWithRLock() {
+ b.RUnlock()
return b.writeBatchResult(inserts, b.writeBatchErrorInvalidState(b.state))
}
- if b.compact.compactingForeground {
- b.Unlock()
- return b.writeBatchResult(inserts, errUnableToWriteBlockConcurrent)
+ if b.state == blockStateSealed {
+ coldBlock := b.coldMutableSegments[len(b.coldMutableSegments)-1]
+ b.RUnlock()
+ return b.writeBatchResult(inserts, coldBlock.WriteBatch(inserts))
}
- // Lazily allocate the segment builder and compactors
- err := b.compact.allocLazyBuilderAndCompactors(b.blockOpts, b.opts)
- if err != nil {
- b.Unlock()
- return b.writeBatchResult(inserts, err)
- }
-
- b.compact.compactingForeground = true
- builder := b.compact.segmentBuilder
- b.Unlock()
-
- defer func() {
- b.Lock()
- b.compact.compactingForeground = false
- b.cleanupForegroundCompactWithLock()
- b.Unlock()
- }()
-
- builder.Reset(0)
- insertResultErr := builder.InsertBatch(m3ninxindex.Batch{
- Docs: inserts.PendingDocs(),
- AllowPartialUpdates: true,
- })
- if len(builder.Docs()) == 0 {
- // No inserts, no need to compact.
- return b.writeBatchResult(inserts, insertResultErr)
- }
-
- // We inserted some documents, need to compact immediately into a
- // foreground segment from the segment builder before we can serve reads
- // from an FST segment.
- err = b.foregroundCompactWithBuilder(builder)
- if err != nil {
- return b.writeBatchResult(inserts, err)
- }
-
- // Return result from the original insertion since compaction was successful.
- return b.writeBatchResult(inserts, insertResultErr)
+ b.RUnlock()
+ return b.writeBatchResult(inserts, b.mutableSegments.WriteBatch(inserts))
}
func (b *block) writeBatchResult(
@@ -507,228 +312,42 @@ func (b *block) writeBatchResult(
}, partialErr
}
-func (b *block) foregroundCompactWithBuilder(builder segment.DocumentsBuilder) error {
- // We inserted some documents, need to compact immediately into a
- // foreground segment.
- b.Lock()
- foregroundSegments := b.foregroundSegments
- b.Unlock()
-
- segs := make([]compaction.Segment, 0, len(foregroundSegments)+1)
- segs = append(segs, compaction.Segment{
- Age: 0,
- Size: int64(len(builder.Docs())),
- Type: segments.MutableType,
- Builder: builder,
- })
- for _, seg := range foregroundSegments {
- segs = append(segs, compaction.Segment{
- Age: seg.Age(),
- Size: seg.Segment().Size(),
- Type: segments.FSTType,
- Segment: seg.Segment(),
- })
- }
-
- plan, err := compaction.NewPlan(segs, b.opts.ForegroundCompactionPlannerOptions())
- if err != nil {
- return err
- }
-
- // Check plan
- if len(plan.Tasks) == 0 {
- // Should always generate a task when a mutable builder is passed to planner
- return errForegroundCompactorNoPlan
- }
- if taskNumBuilders(plan.Tasks[0]) != 1 {
- // First task of plan must include the builder, so we can avoid resetting it
- // for the first task, but then safely reset it in consequent tasks
- return errForegroundCompactorBadPlanFirstTask
- }
-
- // Move any unused segments to the background.
- b.Lock()
- b.maybeMoveForegroundSegmentsToBackgroundWithLock(plan.UnusedSegments)
- b.Unlock()
-
- n := b.compact.numForeground
- b.compact.numForeground++
-
- logger := b.logger.With(
- zap.Time("block", b.blockStart),
- zap.Int("numForegroundCompaction", n),
- )
- log := n%compactDebugLogEvery == 0
- if log {
- for i, task := range plan.Tasks {
- summary := task.Summary()
- logger.Debug("planned foreground compaction task",
- zap.Int("task", i),
- zap.Int("numMutable", summary.NumMutable),
- zap.Int("numFST", summary.NumFST),
- zap.Duration("cumulativeMutableAge", summary.CumulativeMutableAge),
- zap.Int64("cumulativeSize", summary.CumulativeSize),
- )
- }
- }
-
- // Run the plan.
- sw := b.metrics.foregroundCompactionPlanRunLatency.Start()
- defer sw.Stop()
-
- // Run the first task, without resetting the builder.
- if err := b.foregroundCompactWithTask(
- builder, plan.Tasks[0],
- log, logger.With(zap.Int("task", 0)),
- ); err != nil {
- return err
- }
-
- // Now run each consequent task, resetting the builder each time since
- // the results from the builder have already been compacted in the first
- // task.
- for i := 1; i < len(plan.Tasks); i++ {
- task := plan.Tasks[i]
- if taskNumBuilders(task) > 0 {
- // Only the first task should compact the builder
- return errForegroundCompactorBadPlanSecondaryTask
- }
- // Now use the builder after resetting it.
- builder.Reset(0)
- if err := b.foregroundCompactWithTask(
- builder, task,
- log, logger.With(zap.Int("task", i)),
- ); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (b *block) maybeMoveForegroundSegmentsToBackgroundWithLock(
- segments []compaction.Segment,
-) {
- if len(segments) == 0 {
- return
- }
- if b.compact.backgroundCompactor == nil {
- // No longer performing background compaction due to evict/close.
- return
- }
-
- b.logger.Debug("moving segments from foreground to background",
- zap.Int("numSegments", len(segments)))
-
- // If background compaction is still active, then we move any unused
- // foreground segments into the background so that they might be
- // compacted by the background compactor at some point.
- i := 0
- for _, currForeground := range b.foregroundSegments {
- movedToBackground := false
- for _, seg := range segments {
- if currForeground.Segment() == seg.Segment {
- b.backgroundSegments = append(b.backgroundSegments, currForeground)
- movedToBackground = true
- break
- }
- }
- if movedToBackground {
- continue // No need to keep this segment, we moved it.
- }
-
- b.foregroundSegments[i] = currForeground
- i++
+func (b *block) writesAcceptedWithRLock() bool {
+ if b.state == blockStateOpen {
+ return true
}
-
- b.foregroundSegments = b.foregroundSegments[:i]
-
- // Potentially kick off a background compaction.
- b.maybeBackgroundCompactWithLock()
+ return b.state == blockStateSealed &&
+ b.nsMD.Options().ColdWritesEnabled()
}
-func (b *block) foregroundCompactWithTask(
- builder segment.DocumentsBuilder,
- task compaction.Task,
- log bool,
- logger *zap.Logger,
-) error {
- if log {
- logger.Debug("start compaction task")
- }
-
- segments := make([]segment.Segment, 0, len(task.Segments))
- for _, seg := range task.Segments {
- if seg.Segment == nil {
- continue // This means the builder is being used.
- }
- segments = append(segments, seg.Segment)
- }
-
- start := time.Now()
- compacted, err := b.compact.foregroundCompactor.CompactUsingBuilder(builder, segments, mmap.ReporterOptions{
- Context: mmap.Context{
- Name: mmapIndexBlockName,
- },
- Reporter: b.opts.MmapReporter(),
- })
- took := time.Since(start)
- b.metrics.foregroundCompactionTaskRunLatency.Record(took)
-
- if log {
- logger.Debug("done compaction task", zap.Duration("took", took))
- }
-
+func (b *block) executorWithRLock() (search.Executor, error) {
+ readers, err := b.segmentReadersWithRLock()
if err != nil {
- return err
- }
-
- // Rotate in the ones we just compacted.
- b.Lock()
- defer b.Unlock()
-
- result := b.addCompactedSegmentFromSegments(b.foregroundSegments,
- segments, compacted)
- b.foregroundSegments = result
-
- return nil
-}
-
-func (b *block) cleanupForegroundCompactWithLock() {
- // Check if we need to close all the compacted segments due to
- // having evicted mutable segments or the block being closed.
- if !b.shouldEvictCompactedSegmentsWithLock() {
- return
- }
-
- // Evict compacted segments.
- b.closeCompactedSegments(b.foregroundSegments)
- b.foregroundSegments = nil
-
- // Free compactor resources.
- if b.compact.foregroundCompactor == nil {
- return
+ return nil, err
}
- if err := b.compact.foregroundCompactor.Close(); err != nil {
- instrument.EmitAndLogInvariantViolation(b.iopts, func(l *zap.Logger) {
- l.Error("error closing index block foreground compactor", zap.Error(err))
- })
+ indexReaders := make([]m3ninxindex.Reader, 0, len(readers))
+ for _, r := range readers {
+ indexReaders = append(indexReaders, r)
}
- b.compact.foregroundCompactor = nil
- b.compact.segmentBuilder = nil
+ return executor.NewExecutor(indexReaders), nil
}
-func (b *block) executorWithRLock() (search.Executor, error) {
- expectedReaders := len(b.foregroundSegments) + len(b.backgroundSegments)
- for _, group := range b.shardRangesSegments {
- expectedReaders += len(group.segments)
+func (b *block) segmentReadersWithRLock() ([]segment.Reader, error) {
+ expectedReaders := b.mutableSegments.Len()
+ for _, coldSeg := range b.coldMutableSegments {
+ expectedReaders += coldSeg.Len()
}
+ b.shardRangesSegmentsByVolumeType.forEachSegmentGroup(func(group blockShardRangesSegments) error {
+ expectedReaders += len(group.segments)
+ return nil
+ })
var (
- readers = make([]m3ninxindex.Reader, 0, expectedReaders)
+ readers = make([]segment.Reader, 0, expectedReaders)
success = false
+ err error
)
defer func() {
// Cleanup in case any of the readers below fail.
@@ -739,54 +358,34 @@ func (b *block) executorWithRLock() (search.Executor, error) {
}
}()
- // Add foreground and background segments.
- var foregroundErr, backgroundErr error
- readers, foregroundErr = addReadersFromReadableSegments(readers,
- b.foregroundSegments)
- readers, backgroundErr = addReadersFromReadableSegments(readers,
- b.backgroundSegments)
- if err := xerrors.FirstError(foregroundErr, backgroundErr); err != nil {
+ // Add mutable segments.
+ readers, err = b.mutableSegments.AddReaders(readers)
+ if err != nil {
return nil, err
}
- // Loop over the segments associated to shard time ranges.
- for _, group := range b.shardRangesSegments {
- for _, seg := range group.segments {
- reader, err := seg.Reader()
- if err != nil {
- return nil, err
- }
- readers = append(readers, reader)
+ // Add cold mutable segments.
+ for _, coldSeg := range b.coldMutableSegments {
+ readers, err = coldSeg.AddReaders(readers)
+ if err != nil {
+ return nil, err
}
}
- success = true
- return executor.NewExecutor(readers), nil
-}
-
-func (b *block) segmentsWithRLock() []segment.Segment {
- numSegments := len(b.foregroundSegments) + len(b.backgroundSegments)
- for _, group := range b.shardRangesSegments {
- numSegments += len(group.segments)
- }
-
- segments := make([]segment.Segment, 0, numSegments)
- // Add foreground & background segments.
- for _, seg := range b.foregroundSegments {
- segments = append(segments, seg.Segment())
- }
- for _, seg := range b.backgroundSegments {
- segments = append(segments, seg.Segment())
- }
-
// Loop over the segments associated to shard time ranges.
- for _, group := range b.shardRangesSegments {
- for _, seg := range group.segments {
- segments = append(segments, seg)
+ if err := b.shardRangesSegmentsByVolumeType.forEachSegment(func(seg segment.Segment) error {
+ reader, err := seg.Reader()
+ if err != nil {
+ return err
}
+ readers = append(readers, reader)
+ return nil
+ }); err != nil {
+ return nil, err
}
- return segments
+ success = true
+ return readers, nil
}
// Query acquires a read lock on the block so that the segments
@@ -832,7 +431,7 @@ func (b *block) queryWithSpan(
return false, ErrUnableToQueryBlockClosed
}
- exec, err := b.newExecutorFn()
+ exec, err := b.newExecutorWithRLockFn()
if err != nil {
return false, err
}
@@ -842,7 +441,7 @@ func (b *block) queryWithSpan(
execCloseRegistered := false
defer func() {
if !execCloseRegistered {
- b.closeExecutorAsync(exec)
+ b.closeAsync(exec)
}
}()
@@ -864,13 +463,14 @@ func (b *block) queryWithSpan(
}
execCloseRegistered = true // Make sure to not locally close it.
ctx.RegisterFinalizer(resource.FinalizerFn(func() {
- b.closeExecutorAsync(exec)
+ b.closeAsync(exec)
}))
cancellable.ReleaseCheckout()
var (
iterCloser = safeCloser{closable: iter}
size = results.Size()
+ docsCount = results.TotalDocsCount()
docsPool = b.opts.DocumentArrayPool()
batch = docsPool.Get()
batchSize = cap(batch)
@@ -886,7 +486,7 @@ func (b *block) queryWithSpan(
}()
for iter.Next() {
- if opts.LimitExceeded(size) {
+ if opts.SeriesLimitExceeded(size) || opts.DocsLimitExceeded(docsCount) {
break
}
@@ -895,7 +495,7 @@ func (b *block) queryWithSpan(
continue
}
- batch, size, err = b.addQueryResults(cancellable, results, batch)
+ batch, size, docsCount, err = b.addQueryResults(cancellable, results, batch)
if err != nil {
return false, err
}
@@ -903,7 +503,7 @@ func (b *block) queryWithSpan(
// Add last batch to results if remaining.
if len(batch) > 0 {
- batch, size, err = b.addQueryResults(cancellable, results, batch)
+ batch, size, docsCount, err = b.addQueryResults(cancellable, results, batch)
if err != nil {
return false, err
}
@@ -916,14 +516,14 @@ func (b *block) queryWithSpan(
return false, err
}
- exhaustive := !opts.LimitExceeded(size)
+ exhaustive := !opts.SeriesLimitExceeded(size) && !opts.DocsLimitExceeded(docsCount)
return exhaustive, nil
}
-func (b *block) closeExecutorAsync(exec search.Executor) {
- // Note: This only happens if closing the readers isn't clean.
- if err := exec.Close(); err != nil {
- b.logger.Error("could not close search exec", zap.Error(err))
+func (b *block) closeAsync(closer io.Closer) {
+ if err := closer.Close(); err != nil {
+ // Note: This only happens if closing the readers isn't clean.
+ b.logger.Error("could not close query index block resource", zap.Error(err))
}
}
@@ -931,16 +531,21 @@ func (b *block) addQueryResults(
cancellable *resource.CancellableLifetime,
results BaseResults,
batch []doc.Document,
-) ([]doc.Document, int, error) {
+) ([]doc.Document, int, int, error) {
+ // update recently queried docs to monitor memory.
+ if err := b.queryStats.Update(len(batch)); err != nil {
+ return batch, 0, 0, err
+ }
+
// checkout the lifetime of the query before adding results.
queryValid := cancellable.TryCheckout()
if !queryValid {
// query not valid any longer, do not add results and return early.
- return batch, 0, errCancelledQuery
+ return batch, 0, 0, errCancelledQuery
}
// try to add the docs to the resource.
- size, err := results.AddDocuments(batch)
+ size, docsCount, err := results.AddDocuments(batch)
// immediately release the checkout on the lifetime of query.
cancellable.ReleaseCheckout()
@@ -953,7 +558,7 @@ func (b *block) addQueryResults(
batch = batch[:0]
// return results.
- return batch, size, err
+ return batch, size, docsCount, err
}
// Aggregate acquires a read lock on the block so that the segments
@@ -997,7 +602,8 @@ func (b *block) aggregateWithSpan(
aggOpts := results.AggregateResultsOptions()
iterateTerms := aggOpts.Type == AggregateTagNamesAndValues
iterateOpts := fieldsAndTermsIteratorOpts{
- iterateTerms: iterateTerms,
+ restrictByQuery: aggOpts.RestrictByQuery,
+ iterateTerms: iterateTerms,
allowFn: func(field []byte) bool {
// skip any field names that we shouldn't allow.
if bytes.Equal(field, doc.IDReservedFieldName) {
@@ -1005,7 +611,7 @@ func (b *block) aggregateWithSpan(
}
return aggOpts.FieldFilter.Allow(field)
},
- fieldIterFn: func(s segment.Segment) (segment.FieldsIterator, error) {
+ fieldIterFn: func(r segment.Reader) (segment.FieldsIterator, error) {
// NB(prateek): we default to using the regular (FST) fields iterator
// unless we have a predefined list of fields we know we need to restrict
// our search to, in which case we iterate that list and check if known values
@@ -1017,9 +623,9 @@ func (b *block) aggregateWithSpan(
// to this function is expected to have (FieldsFilter) pretty small. If that changes
// in the future, we can revisit this.
if len(aggOpts.FieldFilter) == 0 {
- return s.FieldsIterable().Fields()
+ return r.Fields()
}
- return newFilterFieldsIterator(s, aggOpts.FieldFilter)
+ return newFilterFieldsIterator(r, aggOpts.FieldFilter)
},
}
@@ -1030,6 +636,7 @@ func (b *block) aggregateWithSpan(
var (
size = results.Size()
+ docsCount = results.TotalDocsCount()
batch = b.opts.AggregateResultsEntryArrayPool().Get()
batchSize = cap(batch)
iterClosed = false // tracking whether we need to free the iterator at the end.
@@ -1046,20 +653,34 @@ func (b *block) aggregateWithSpan(
}
}()
- segs := b.segmentsWithRLock()
- for _, s := range segs {
- if opts.LimitExceeded(size) {
+ readers, err := b.segmentReadersWithRLock()
+ if err != nil {
+ return false, err
+ }
+
+ // Make sure to close readers at end of query since results can
+ // include references to the underlying bytes from the index segment
+ // read by the readers.
+ for _, reader := range readers {
+ reader := reader // Capture for inline function.
+ ctx.RegisterFinalizer(resource.FinalizerFn(func() {
+ b.closeAsync(reader)
+ }))
+ }
+
+ for _, reader := range readers {
+ if opts.SeriesLimitExceeded(size) || opts.DocsLimitExceeded(docsCount) {
break
}
- err = iter.Reset(s, iterateOpts)
+ err = iter.Reset(reader, iterateOpts)
if err != nil {
return false, err
}
iterClosed = false // only once the iterator has been successfully Reset().
for iter.Next() {
- if opts.LimitExceeded(size) {
+ if opts.SeriesLimitExceeded(size) || opts.DocsLimitExceeded(docsCount) {
break
}
@@ -1069,7 +690,7 @@ func (b *block) aggregateWithSpan(
continue
}
- batch, size, err = b.addAggregateResults(cancellable, results, batch)
+ batch, size, docsCount, err = b.addAggregateResults(cancellable, results, batch)
if err != nil {
return false, err
}
@@ -1087,13 +708,13 @@ func (b *block) aggregateWithSpan(
// Add last batch to results if remaining.
if len(batch) > 0 {
- batch, size, err = b.addAggregateResults(cancellable, results, batch)
+ batch, size, docsCount, err = b.addAggregateResults(cancellable, results, batch)
if err != nil {
return false, err
}
}
- exhaustive := !opts.LimitExceeded(size)
+ exhaustive := !opts.SeriesLimitExceeded(size) && !opts.DocsLimitExceeded(docsCount)
return exhaustive, nil
}
@@ -1136,11 +757,18 @@ func (b *block) appendFieldAndTermToBatch(
reuseLastEntry = true
entry = batch[len(batch)-1] // avoid alloc cause we already have the field
} else {
- entry.Field = b.pooledID(field) // allocate id because this is the first time we've seen it
+ // allocate id because this is the first time we've seen it
+ // NB(r): Iterating fields FST, this byte slice is only temporarily available
+ // since we are pushing/popping characters from the stack as we iterate
+ // the fields FST and reusing the same byte slice.
+ entry.Field = b.pooledID(field)
}
if includeTerms {
// terms are always new (as far we know without checking the map for duplicates), so we allocate
+ // NB(r): Iterating terms FST, this byte slice is only temporarily available
+ // since we are pushing/popping characters from the stack as we iterate
+ // the terms FST and reusing the same byte slice.
entry.Terms = append(entry.Terms, b.pooledID(term))
}
@@ -1164,16 +792,21 @@ func (b *block) addAggregateResults(
cancellable *resource.CancellableLifetime,
results AggregateResults,
batch []AggregateResultsEntry,
-) ([]AggregateResultsEntry, int, error) {
+) ([]AggregateResultsEntry, int, int, error) {
+ // update recently queried docs to monitor memory.
+ if err := b.queryStats.Update(len(batch)); err != nil {
+ return batch, 0, 0, err
+ }
+
// checkout the lifetime of the query before adding results.
queryValid := cancellable.TryCheckout()
if !queryValid {
// query not valid any longer, do not add results and return early.
- return batch, 0, errCancelledQuery
+ return batch, 0, 0, errCancelledQuery
}
// try to add the docs to the resource.
- size := results.AddFields(batch)
+ size, docsCount := results.AddFields(batch)
// immediately release the checkout on the lifetime of query.
cancellable.ReleaseCheckout()
@@ -1186,15 +819,27 @@ func (b *block) addAggregateResults(
batch = batch[:0]
// return results.
- return batch, size, nil
+ return batch, size, docsCount, nil
}
func (b *block) AddResults(
- results result.IndexBlock,
+ resultsByVolumeType result.IndexBlockByVolumeType,
) error {
b.Lock()
defer b.Unlock()
+ multiErr := xerrors.NewMultiError()
+ for volumeType, results := range resultsByVolumeType.Iter() {
+ multiErr = multiErr.Add(b.addResults(volumeType, results))
+ }
+
+ return multiErr.FinalError()
+}
+
+func (b *block) addResults(
+ volumeType persist.IndexVolumeType,
+ results result.IndexBlock,
+) error {
// NB(prateek): we have to allow bootstrap to succeed even if we're Sealed because
// of topology changes. i.e. if the current m3db process is assigned new shards,
// we need to include their data in the index.
@@ -1212,6 +857,12 @@ func (b *block) AddResults(
results.Fulfilled().SummaryString(), blockRange.String())
}
+ shardRangesSegments, ok := b.shardRangesSegmentsByVolumeType[volumeType]
+ if !ok {
+ shardRangesSegments = make([]blockShardRangesSegments, 0)
+ b.shardRangesSegmentsByVolumeType[volumeType] = shardRangesSegments
+ }
+
var (
plCache = b.opts.PostingsListCache()
readThroughOpts = b.opts.ReadThroughSegmentOptions()
@@ -1219,12 +870,12 @@ func (b *block) AddResults(
)
readThroughSegments := make([]segment.Segment, 0, len(segments))
for _, seg := range segments {
- readThroughSeg := seg
- if immSeg, ok := seg.(segment.ImmutableSegment); ok {
+ elem := seg.Segment()
+ if immSeg, ok := elem.(segment.ImmutableSegment); ok {
// only wrap the immutable segments with a read through cache.
- readThroughSeg = NewReadThroughSegment(immSeg, plCache, readThroughOpts)
+ elem = NewReadThroughSegment(immSeg, plCache, readThroughOpts)
}
- readThroughSegments = append(readThroughSegments, readThroughSeg)
+ readThroughSegments = append(readThroughSegments, elem)
}
entry := blockShardRangesSegments{
@@ -1234,8 +885,8 @@ func (b *block) AddResults(
// first see if this block can cover all our current blocks covering shard
// time ranges.
- currFulfilled := make(result.ShardTimeRanges)
- for _, existing := range b.shardRangesSegments {
+ currFulfilled := result.NewShardTimeRanges()
+ for _, existing := range shardRangesSegments {
currFulfilled.AddRanges(existing.shardTimeRanges)
}
@@ -1244,21 +895,21 @@ func (b *block) AddResults(
if !unfulfilledBySegments.IsEmpty() {
// This is the case where it cannot wholly replace the current set of blocks
// so simply append the segments in this case.
- b.shardRangesSegments = append(b.shardRangesSegments, entry)
+ b.shardRangesSegmentsByVolumeType[volumeType] = append(shardRangesSegments, entry)
return nil
}
// This is the case where the new segments can wholly replace the
// current set of blocks since unfullfilled by the new segments is zero.
multiErr := xerrors.NewMultiError()
- for i, group := range b.shardRangesSegments {
+ for i, group := range shardRangesSegments {
for _, seg := range group.segments {
// Make sure to close the existing segments.
multiErr = multiErr.Add(seg.Close())
}
- b.shardRangesSegments[i] = blockShardRangesSegments{}
+ shardRangesSegments[i] = blockShardRangesSegments{}
}
- b.shardRangesSegments = append(b.shardRangesSegments[:0], entry)
+ b.shardRangesSegmentsByVolumeType[volumeType] = append(shardRangesSegments[:0], entry)
return multiErr.FinalError()
}
@@ -1272,38 +923,41 @@ func (b *block) Tick(c context.Cancellable) (BlockTickResult, error) {
}
// Add foreground/background segments.
- for _, seg := range b.foregroundSegments {
- result.NumSegments++
- result.NumDocs += seg.Segment().Size()
- }
- for _, seg := range b.backgroundSegments {
- result.NumSegments++
- result.NumDocs += seg.Segment().Size()
+ numSegments, numDocs := b.mutableSegments.NumSegmentsAndDocs()
+ for _, coldSeg := range b.coldMutableSegments {
+ coldNumSegments, coldNumDocs := coldSeg.NumSegmentsAndDocs()
+ numSegments += coldNumSegments
+ numDocs += coldNumDocs
}
+ result.NumSegments += numSegments
+ result.NumSegmentsMutable += numSegments
+ result.NumDocs += numDocs
multiErr := xerrors.NewMultiError()
// Any segments covering persisted shard ranges.
- for _, group := range b.shardRangesSegments {
- for _, seg := range group.segments {
- result.NumSegments++
- result.NumDocs += seg.Size()
+ b.shardRangesSegmentsByVolumeType.forEachSegment(func(seg segment.Segment) error {
+ result.NumSegments++
+ result.NumSegmentsBootstrapped++
+ result.NumDocs += seg.Size()
- immSeg, ok := seg.(segment.ImmutableSegment)
- if !ok {
- b.metrics.segmentFreeMmapSkipNotImmutable.Inc(1)
- continue
- }
+ immSeg, ok := seg.(segment.ImmutableSegment)
+ if !ok {
+ b.metrics.segmentFreeMmapSkipNotImmutable.Inc(1)
+ return nil
+ }
- // TODO(bodu): Revist this and implement a more sophisticated free strategy.
- if err := immSeg.FreeMmap(); err != nil {
- multiErr = multiErr.Add(err)
- b.metrics.segmentFreeMmapError.Inc(1)
- continue
- }
- b.metrics.segmentFreeMmapSuccess.Inc(1)
+ // TODO(bodu): Revist this and implement a more sophisticated free strategy.
+ if err := immSeg.FreeMmap(); err != nil {
+ multiErr = multiErr.Add(err)
+ b.metrics.segmentFreeMmapError.Inc(1)
+ return nil
}
- }
+
+ result.FreeMmap++
+ b.metrics.segmentFreeMmapSuccess.Inc(1)
+ return nil
+ })
return result, multiErr.FinalError()
}
@@ -1332,36 +986,22 @@ func (b *block) Stats(reporter BlockStatsReporter) error {
return ErrUnableReportStatsBlockClosed
}
- for _, seg := range b.foregroundSegments {
- _, mutable := seg.Segment().(segment.MutableSegment)
- reporter.ReportSegmentStats(BlockSegmentStats{
- Type: ActiveForegroundSegment,
- Mutable: mutable,
- Age: seg.Age(),
- Size: seg.Segment().Size(),
- })
+ b.mutableSegments.Stats(reporter)
+ for _, coldSeg := range b.coldMutableSegments {
+ // TODO(bodu): Cold segment stats should prob be of a
+ // diff type or something.
+ coldSeg.Stats(reporter)
}
- for _, seg := range b.backgroundSegments {
- _, mutable := seg.Segment().(segment.MutableSegment)
+
+ b.shardRangesSegmentsByVolumeType.forEachSegment(func(seg segment.Segment) error {
+ _, mutable := seg.(segment.MutableSegment)
reporter.ReportSegmentStats(BlockSegmentStats{
- Type: ActiveBackgroundSegment,
+ Type: FlushedSegment,
Mutable: mutable,
- Age: seg.Age(),
- Size: seg.Segment().Size(),
+ Size: seg.Size(),
})
- }
-
- for _, shardRangeSegments := range b.shardRangesSegments {
- for _, seg := range shardRangeSegments.segments {
- _, mutable := seg.(segment.MutableSegment)
- reporter.ReportSegmentStats(BlockSegmentStats{
- Type: FlushedSegment,
- Mutable: mutable,
- Size: seg.Size(),
- })
- }
- }
-
+ return nil
+ })
return nil
}
@@ -1379,23 +1019,16 @@ func (b *block) NeedsMutableSegmentsEvicted() bool {
b.RLock()
defer b.RUnlock()
- // Check any foreground/background segments that can be evicted after a flush.
- var anyMutableSegmentNeedsEviction bool
- for _, seg := range b.foregroundSegments {
- anyMutableSegmentNeedsEviction = anyMutableSegmentNeedsEviction || seg.Segment().Size() > 0
- }
- for _, seg := range b.backgroundSegments {
- anyMutableSegmentNeedsEviction = anyMutableSegmentNeedsEviction || seg.Segment().Size() > 0
- }
+ // Check any mutable segments that can be evicted after a flush.
+ anyMutableSegmentNeedsEviction := b.mutableSegments.NeedsEviction()
// Check boostrapped segments and to see if any of them need an eviction.
- for _, shardRangeSegments := range b.shardRangesSegments {
- for _, seg := range shardRangeSegments.segments {
- if mutableSeg, ok := seg.(segment.MutableSegment); ok {
- anyMutableSegmentNeedsEviction = anyMutableSegmentNeedsEviction || mutableSeg.Size() > 0
- }
+ b.shardRangesSegmentsByVolumeType.forEachSegment(func(seg segment.Segment) error {
+ if mutableSeg, ok := seg.(segment.MutableSegment); ok {
+ anyMutableSegmentNeedsEviction = anyMutableSegmentNeedsEviction || mutableSeg.Size() > 0
}
- }
+ return nil
+ })
return anyMutableSegmentNeedsEviction
}
@@ -1407,36 +1040,91 @@ func (b *block) EvictMutableSegments() error {
return fmt.Errorf("unable to evict mutable segments, block must be sealed, found: %v", b.state)
}
- b.hasEvictedMutableSegmentsAnyTimes = true
-
- // If not compacting, trigger a cleanup so that all frozen segments get
- // closed, otherwise after the current running compaction the compacted
- // segments will get closed.
- if !b.compact.compactingForeground {
- b.cleanupForegroundCompactWithLock()
- }
- if !b.compact.compactingBackground {
- b.cleanupBackgroundCompactWithLock()
- }
+ b.mutableSegments.Close()
// Close any other mutable segments that was added.
multiErr := xerrors.NewMultiError()
- for idx := range b.shardRangesSegments {
- segments := make([]segment.Segment, 0, len(b.shardRangesSegments[idx].segments))
- for _, seg := range b.shardRangesSegments[idx].segments {
- mutableSeg, ok := seg.(segment.MutableSegment)
- if !ok {
- segments = append(segments, seg)
- continue
+ for _, shardRangesSegments := range b.shardRangesSegmentsByVolumeType {
+ for idx := range shardRangesSegments {
+ segments := make([]segment.Segment, 0, len(shardRangesSegments[idx].segments))
+ for _, seg := range shardRangesSegments[idx].segments {
+ mutableSeg, ok := seg.(segment.MutableSegment)
+ if !ok {
+ segments = append(segments, seg)
+ continue
+ }
+ multiErr = multiErr.Add(mutableSeg.Close())
}
- multiErr = multiErr.Add(mutableSeg.Close())
+ shardRangesSegments[idx].segments = segments
}
- b.shardRangesSegments[idx].segments = segments
}
return multiErr.FinalError()
}
+func (b *block) NeedsColdMutableSegmentsEvicted() bool {
+ b.RLock()
+ defer b.RUnlock()
+ var anyColdMutableSegmentNeedsEviction bool
+ for _, coldSeg := range b.coldMutableSegments {
+ anyColdMutableSegmentNeedsEviction = anyColdMutableSegmentNeedsEviction || coldSeg.NeedsEviction()
+ }
+ return b.state == blockStateSealed && anyColdMutableSegmentNeedsEviction
+}
+
+func (b *block) EvictColdMutableSegments() error {
+ b.Lock()
+ defer b.Unlock()
+ if b.state != blockStateSealed {
+ return fmt.Errorf("unable to evict cold mutable segments, block must be sealed, found: %v", b.state)
+ }
+
+ // Evict/remove all but the most recent cold mutable segment (That is the one we are actively writing to).
+ for i, coldSeg := range b.coldMutableSegments {
+ if i < len(b.coldMutableSegments)-1 {
+ coldSeg.Close()
+ b.coldMutableSegments[i] = nil
+ }
+ }
+ // Swap last w/ first and truncate the slice.
+ lastIdx := len(b.coldMutableSegments) - 1
+ b.coldMutableSegments[0], b.coldMutableSegments[lastIdx] = b.coldMutableSegments[lastIdx], b.coldMutableSegments[0]
+ b.coldMutableSegments = b.coldMutableSegments[:1]
+ return nil
+}
+
+func (b *block) RotateColdMutableSegments() {
+ b.Lock()
+ defer b.Unlock()
+ b.coldMutableSegments = append(b.coldMutableSegments, newMutableSegments(
+ b.blockStart,
+ b.opts,
+ b.blockOpts,
+ b.namespaceRuntimeOptsMgr,
+ b.iopts,
+ ))
+}
+
+func (b *block) MemorySegmentsData(ctx context.Context) ([]fst.SegmentData, error) {
+ b.RLock()
+ defer b.RUnlock()
+ if b.state == blockStateClosed {
+ return nil, errBlockAlreadyClosed
+ }
+ data, err := b.mutableSegments.MemorySegmentsData(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, coldSeg := range b.coldMutableSegments {
+ coldData, err := coldSeg.MemorySegmentsData(ctx)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, coldData...)
+ }
+ return data, nil
+}
+
func (b *block) Close() error {
b.Lock()
defer b.Unlock()
@@ -1445,24 +1133,21 @@ func (b *block) Close() error {
}
b.state = blockStateClosed
- // If not compacting, trigger a cleanup so that all frozen segments get
- // closed, otherwise after the current running compaction the compacted
- // segments will get closed.
- if !b.compact.compactingForeground {
- b.cleanupForegroundCompactWithLock()
- }
- if !b.compact.compactingBackground {
- b.cleanupBackgroundCompactWithLock()
+ b.mutableSegments.Close()
+ for _, coldSeg := range b.coldMutableSegments {
+ coldSeg.Close()
}
// Close any other added segments too.
var multiErr xerrors.MultiError
- for _, group := range b.shardRangesSegments {
- for _, seg := range group.segments {
- multiErr = multiErr.Add(seg.Close())
- }
+ b.shardRangesSegmentsByVolumeType.forEachSegment(func(seg segment.Segment) error {
+ multiErr = multiErr.Add(seg.Close())
+ return nil
+ })
+
+ for volumeType := range b.shardRangesSegmentsByVolumeType {
+ b.shardRangesSegmentsByVolumeType[volumeType] = nil
}
- b.shardRangesSegments = nil
return multiErr.FinalError()
}
@@ -1482,67 +1167,6 @@ func (b *block) writeBatchErrorInvalidState(state blockState) error {
}
}
-// blockCompact has several lazily allocated compaction components.
-type blockCompact struct {
- segmentBuilder segment.DocumentsBuilder
- foregroundCompactor *compaction.Compactor
- backgroundCompactor *compaction.Compactor
- compactingForeground bool
- compactingBackground bool
- numForeground int
- numBackground int
-}
-
-func (b *blockCompact) allocLazyBuilderAndCompactors(
- blockOpts BlockOptions,
- opts Options,
-) error {
- var (
- err error
- docsPool = opts.DocumentArrayPool()
- )
- if b.segmentBuilder == nil {
- b.segmentBuilder, err = builder.NewBuilderFromDocuments(opts.SegmentBuilderOptions())
- if err != nil {
- return err
- }
- }
-
- if b.foregroundCompactor == nil {
- b.foregroundCompactor, err = compaction.NewCompactor(docsPool,
- DocumentArrayPoolCapacity,
- opts.SegmentBuilderOptions(),
- opts.FSTSegmentOptions(),
- compaction.CompactorOptions{
- FSTWriterOptions: &fst.WriterOptions{
- // DisableRegistry is set to true to trade a larger FST size
- // for a faster FST compaction since we want to reduce the end
- // to end latency for time to first index a metric.
- DisableRegistry: true,
- },
- MmapDocsData: blockOpts.ForegroundCompactorMmapDocsData,
- })
- if err != nil {
- return err
- }
- }
-
- if b.backgroundCompactor == nil {
- b.backgroundCompactor, err = compaction.NewCompactor(docsPool,
- DocumentArrayPoolCapacity,
- opts.SegmentBuilderOptions(),
- opts.FSTSegmentOptions(),
- compaction.CompactorOptions{
- MmapDocsData: blockOpts.BackgroundCompactorMmapDocsData,
- })
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
type closable interface {
Close() error
}
@@ -1559,28 +1183,3 @@ func (c *safeCloser) Close() error {
c.closed = true
return c.closable.Close()
}
-
-func taskNumBuilders(task compaction.Task) int {
- builders := 0
- for _, seg := range task.Segments {
- if seg.Builder != nil {
- builders++
- continue
- }
- }
- return builders
-}
-
-func addReadersFromReadableSegments(
- readers []m3ninxindex.Reader,
- segments []*readableSeg,
-) ([]m3ninxindex.Reader, error) {
- for _, seg := range segments {
- reader, err := seg.Segment().Reader()
- if err != nil {
- return nil, err
- }
- readers = append(readers, reader)
- }
- return readers, nil
-}
diff --git a/src/dbnode/storage/index/block_bench_test.go b/src/dbnode/storage/index/block_bench_test.go
index c3d8eb20bf..180b5048d3 100644
--- a/src/dbnode/storage/index/block_bench_test.go
+++ b/src/dbnode/storage/index/block_bench_test.go
@@ -28,6 +28,7 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/m3ninx/doc"
xtime "github.com/m3db/m3/src/x/time"
@@ -46,8 +47,8 @@ func BenchmarkBlockWrite(b *testing.B) {
now := time.Now()
blockStart := now.Truncate(blockSize)
- bl, err := NewBlock(blockStart, testMD,
- BlockOptions{}, testOpts)
+ bl, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(b, err)
defer func() {
require.NoError(b, bl.Close())
@@ -103,7 +104,7 @@ func BenchmarkBlockWrite(b *testing.B) {
// Reset state
bl.(*block).Lock()
- bl.(*block).foregroundSegments = nil
+ bl.(*block).mutableSegments.foregroundSegments = nil
bl.(*block).Unlock()
}
b.StopTimer()
diff --git a/src/dbnode/storage/index/block_prop_test.go b/src/dbnode/storage/index/block_prop_test.go
index 8bf9974796..447ca2f12e 100644
--- a/src/dbnode/storage/index/block_prop_test.go
+++ b/src/dbnode/storage/index/block_prop_test.go
@@ -35,6 +35,7 @@ import (
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/m3ninx/search"
"github.com/m3db/m3/src/m3ninx/search/proptest"
"github.com/m3db/m3/src/x/context"
@@ -177,19 +178,22 @@ func TestPostingsListCacheDoesNotAffectBlockQueryResults(t *testing.T) {
}
func newPropTestBlock(t *testing.T, blockStart time.Time, nsMeta namespace.Metadata, opts Options) (Block, error) {
- blk, err := NewBlock(blockStart, nsMeta, BlockOptions{}, opts)
+ blk, err := NewBlock(blockStart, nsMeta, BlockOptions{},
+ namespace.NewRuntimeOptionsManager(nsMeta.ID().String()), opts)
require.NoError(t, err)
var (
memSeg = testSegment(t, lotsTestDocuments...).(segment.MutableSegment)
fstSeg = fst.ToTestSegment(t, memSeg, testFstOptions)
// Need at least one shard to look fulfilled.
- fulfilled = result.NewShardTimeRanges(blockStart, blockStart.Add(testBlockSize), uint32(1))
- indexBlock = result.NewIndexBlock(blockStart, []segment.Segment{fstSeg}, fulfilled)
+ fulfilled = result.NewShardTimeRangesFromRange(blockStart, blockStart.Add(testBlockSize), uint32(1))
+ indexBlockByVolumeType = result.NewIndexBlockByVolumeType(blockStart)
)
+ indexBlockByVolumeType.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(fstSeg, false)}, fulfilled))
+
// Use the AddResults API because thats the only scenario in which we'll wrap a segment
// in a ReadThroughSegment to use the postings list cache.
- err = blk.AddResults(indexBlock)
+ err = blk.AddResults(indexBlockByVolumeType)
require.NoError(t, err)
return blk, nil
}
diff --git a/src/dbnode/storage/index/block_test.go b/src/dbnode/storage/index/block_test.go
index db82cfb94e..5bb078b676 100644
--- a/src/dbnode/storage/index/block_test.go
+++ b/src/dbnode/storage/index/block_test.go
@@ -33,9 +33,9 @@ import (
"github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/idx"
- "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/mem"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/m3ninx/search"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
@@ -75,7 +75,8 @@ func newTestNSMetadata(t require.TestingT) namespace.Metadata {
func TestBlockCtor(t *testing.T) {
md := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- b, err := NewBlock(start, md, BlockOptions{}, testOpts)
+ b, err := NewBlock(start, md, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.Equal(t, start, b.StartTime())
@@ -98,7 +99,8 @@ func TestBlockWriteAfterClose(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- b, err := NewBlock(blockStart, testMD, BlockOptions{}, testOpts)
+ b, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.NoError(t, b.Close())
@@ -146,7 +148,8 @@ func TestBlockWriteAfterSeal(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- b, err := NewBlock(blockStart, testMD, BlockOptions{}, testOpts)
+ b, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.NoError(t, b.Seal())
@@ -194,7 +197,8 @@ func TestBlockWrite(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- blk, err := NewBlock(blockStart, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
defer func() {
require.NoError(t, blk.Close())
@@ -243,7 +247,8 @@ func TestBlockWriteActualSegmentPartialFailure(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- blk, err := NewBlock(blockStart, md, BlockOptions{}, testOpts)
+ blk, err := NewBlock(blockStart, md, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
@@ -265,7 +270,7 @@ func TestBlockWriteActualSegmentPartialFailure(t *testing.T) {
batch.Append(WriteBatchEntry{
Timestamp: nowNotBlockStartAligned,
OnIndexSeries: h2,
- }, testDoc1DupeID())
+ }, doc.Document{})
res, err := b.WriteBatch(batch)
require.Error(t, err)
require.Equal(t, int64(1), res.NumSuccess)
@@ -275,7 +280,7 @@ func TestBlockWriteActualSegmentPartialFailure(t *testing.T) {
batch.ForEach(func(
idx int,
entry WriteBatchEntry,
- doc doc.Document,
+ _ doc.Document,
result WriteBatchEntryResult,
) {
verified++
@@ -283,7 +288,7 @@ func TestBlockWriteActualSegmentPartialFailure(t *testing.T) {
require.NoError(t, result.Err)
} else {
require.Error(t, result.Err)
- require.Equal(t, index.ErrDuplicateID, result.Err)
+ require.Equal(t, doc.ErrEmptyDocument, result.Err)
}
})
require.Equal(t, 2, verified)
@@ -303,7 +308,8 @@ func TestBlockWritePartialFailure(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- blk, err := NewBlock(blockStart, md, BlockOptions{}, testOpts)
+ blk, err := NewBlock(blockStart, md, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
@@ -325,7 +331,7 @@ func TestBlockWritePartialFailure(t *testing.T) {
batch.Append(WriteBatchEntry{
Timestamp: nowNotBlockStartAligned,
OnIndexSeries: h2,
- }, testDoc1DupeID())
+ }, doc.Document{})
res, err := b.WriteBatch(batch)
require.Error(t, err)
@@ -352,7 +358,8 @@ func TestBlockWritePartialFailure(t *testing.T) {
func TestBlockQueryAfterClose(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- b, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ b, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.Equal(t, start, b.StartTime())
@@ -367,7 +374,8 @@ func TestBlockQueryAfterClose(t *testing.T) {
func TestBlockQueryWithCancelledQuery(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- b, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ b, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.Equal(t, start, b.StartTime())
@@ -386,15 +394,14 @@ func TestBlockQueryWithCancelledQuery(t *testing.T) {
func TestBlockQueryExecutorError(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
- b.newExecutorFn = func() (search.Executor, error) {
- b.RLock() // ensures we call newExecutorFn with RLock, or this would deadlock
- defer b.RUnlock()
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return nil, fmt.Errorf("random-err")
}
@@ -409,14 +416,15 @@ func TestBlockQuerySegmentReaderError(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg := segment.NewMockSegment(ctrl)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg, testOpts)}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg, testOpts)}
randErr := fmt.Errorf("random-err")
seg.EXPECT().Reader().Return(nil, randErr)
@@ -431,7 +439,8 @@ func TestBlockQueryAddResultsSegmentsError(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
@@ -441,15 +450,18 @@ func TestBlockQueryAddResultsSegmentsError(t *testing.T) {
seg2 := segment.NewMockMutableSegment(ctrl)
seg3 := segment.NewMockMutableSegment(ctrl)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
- b.shardRangesSegments = []blockShardRangesSegments{
- blockShardRangesSegments{segments: []segment.Segment{seg2, seg3}}}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
+ b.shardRangesSegmentsByVolumeType = map[idxpersist.IndexVolumeType][]blockShardRangesSegments{
+ idxpersist.DefaultIndexVolumeType: []blockShardRangesSegments{
+ blockShardRangesSegments{segments: []segment.Segment{seg2, seg3}},
+ },
+ }
- r1 := index.NewMockReader(ctrl)
+ r1 := segment.NewMockReader(ctrl)
seg1.EXPECT().Reader().Return(r1, nil)
r1.EXPECT().Close().Return(nil)
- r2 := index.NewMockReader(ctrl)
+ r2 := segment.NewMockReader(ctrl)
seg2.EXPECT().Reader().Return(r2, nil)
r2.EXPECT().Close().Return(nil)
@@ -467,7 +479,8 @@ func TestBlockMockQueryExecutorExecError(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
@@ -475,7 +488,7 @@ func TestBlockMockQueryExecutorExecError(t *testing.T) {
// dIter:= doc.NewMockIterator(ctrl)
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
gomock.InOrder(
@@ -493,14 +506,15 @@ func TestBlockMockQueryExecutorExecIterErr(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
@@ -533,14 +547,15 @@ func TestBlockMockQueryExecutorExecLimit(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
@@ -561,7 +576,7 @@ func TestBlockMockQueryExecutorExecLimit(t *testing.T) {
ctx := context.NewContext()
exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
- defaultQuery, QueryOptions{Limit: limit}, results, emptyLogFields)
+ defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -583,14 +598,15 @@ func TestBlockMockQueryExecutorExecIterCloseErr(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
@@ -615,20 +631,21 @@ func TestBlockMockQueryExecutorExecIterCloseErr(t *testing.T) {
ctx.BlockingClose()
}
-func TestBlockMockQueryLimit(t *testing.T) {
+func TestBlockMockQuerySeriesLimitNonExhaustive(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
@@ -648,7 +665,7 @@ func TestBlockMockQueryLimit(t *testing.T) {
ctx := context.NewContext()
exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
- defaultQuery, QueryOptions{Limit: limit}, results, emptyLogFields)
+ defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -664,20 +681,21 @@ func TestBlockMockQueryLimit(t *testing.T) {
ctx.BlockingClose()
}
-func TestBlockMockQueryLimitExhaustive(t *testing.T) {
+func TestBlockMockQuerySeriesLimitExhaustive(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
@@ -698,7 +716,109 @@ func TestBlockMockQueryLimitExhaustive(t *testing.T) {
ctx := context.NewContext()
exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
- defaultQuery, QueryOptions{Limit: limit}, results, emptyLogFields)
+ defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
+ require.NoError(t, err)
+ require.True(t, exhaustive)
+
+ rMap := results.Map()
+ require.Equal(t, 1, rMap.Len())
+ t1, ok := rMap.Get(ident.StringID(string(testDoc1().ID)))
+ require.True(t, ok)
+ require.True(t, ident.NewTagIterMatcher(
+ ident.MustNewTagStringsIterator("bar", "baz")).Matches(
+ t1))
+
+ // NB(r): Make sure to call finalizers blockingly (to finish
+ // the expected close calls)
+ ctx.BlockingClose()
+}
+
+func TestBlockMockQueryDocsLimitNonExhaustive(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ testMD := newTestNSMetadata(t)
+ start := time.Now().Truncate(time.Hour)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
+ require.NoError(t, err)
+
+ b, ok := blk.(*block)
+ require.True(t, ok)
+
+ exec := search.NewMockExecutor(ctrl)
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
+ return exec, nil
+ }
+
+ dIter := doc.NewMockIterator(ctrl)
+ gomock.InOrder(
+ exec.EXPECT().Execute(gomock.Any()).Return(dIter, nil),
+ dIter.EXPECT().Next().Return(true),
+ dIter.EXPECT().Current().Return(testDoc1()),
+ dIter.EXPECT().Next().Return(true),
+ dIter.EXPECT().Err().Return(nil),
+ dIter.EXPECT().Close().Return(nil),
+ exec.EXPECT().Close().Return(nil),
+ )
+ docsLimit := 1
+ results := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
+
+ ctx := context.NewContext()
+
+ exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ defaultQuery, QueryOptions{DocsLimit: docsLimit}, results, emptyLogFields)
+ require.NoError(t, err)
+ require.False(t, exhaustive)
+
+ require.Equal(t, 1, results.Map().Len())
+ t1, ok := results.Map().Get(ident.StringID(string(testDoc1().ID)))
+ require.True(t, ok)
+ require.True(t, ident.NewTagIterMatcher(
+ ident.MustNewTagStringsIterator("bar", "baz")).Matches(
+ t1))
+
+ // NB(r): Make sure to call finalizers blockingly (to finish
+ // the expected close calls)
+ ctx.BlockingClose()
+}
+
+func TestBlockMockQueryDocsLimitExhaustive(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ testMD := newTestNSMetadata(t)
+ start := time.Now().Truncate(time.Hour)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
+ require.NoError(t, err)
+
+ b, ok := blk.(*block)
+ require.True(t, ok)
+
+ exec := search.NewMockExecutor(ctrl)
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
+ return exec, nil
+ }
+
+ dIter := doc.NewMockIterator(ctrl)
+ gomock.InOrder(
+ exec.EXPECT().Execute(gomock.Any()).Return(dIter, nil),
+ dIter.EXPECT().Next().Return(true),
+ dIter.EXPECT().Current().Return(testDoc1()),
+ dIter.EXPECT().Next().Return(false),
+ dIter.EXPECT().Err().Return(nil),
+ dIter.EXPECT().Close().Return(nil),
+ exec.EXPECT().Close().Return(nil),
+ )
+ docsLimit := 2
+ results := NewQueryResults(nil,
+ QueryResultsOptions{}, testOpts)
+
+ ctx := context.NewContext()
+
+ exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ defaultQuery, QueryOptions{DocsLimit: docsLimit}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -721,7 +841,8 @@ func TestBlockMockQueryMergeResultsMapLimit(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
@@ -729,14 +850,14 @@ func TestBlockMockQueryMergeResultsMapLimit(t *testing.T) {
require.NoError(t, b.Seal())
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
limit := 1
results := NewQueryResults(nil,
QueryResultsOptions{SizeLimit: limit}, testOpts)
- _, err = results.AddDocuments([]doc.Document{testDoc1()})
+ _, _, err = results.AddDocuments([]doc.Document{testDoc1()})
require.NoError(t, err)
dIter := doc.NewMockIterator(ctrl)
@@ -751,7 +872,7 @@ func TestBlockMockQueryMergeResultsMapLimit(t *testing.T) {
ctx := context.NewContext()
exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
- defaultQuery, QueryOptions{Limit: limit}, results, emptyLogFields)
+ defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -774,19 +895,20 @@ func TestBlockMockQueryMergeResultsDupeID(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
exec := search.NewMockExecutor(ctrl)
- b.newExecutorFn = func() (search.Executor, error) {
+ b.newExecutorWithRLockFn = func() (search.Executor, error) {
return exec, nil
}
results := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- _, err = results.AddDocuments([]doc.Document{testDoc1()})
+ _, _, err = results.AddDocuments([]doc.Document{testDoc1()})
require.NoError(t, err)
dIter := doc.NewMockIterator(ctrl)
@@ -834,19 +956,23 @@ func TestBlockAddResultsAddsSegment(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
- require.Equal(t, 1, len(b.shardRangesSegments))
-
- require.Equal(t, seg1, b.shardRangesSegments[0].segments[0])
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.NoError(t, b.AddResults(results))
+ shardRangesSegments := b.shardRangesSegmentsByVolumeType[idxpersist.DefaultIndexVolumeType]
+ require.Equal(t, 1, len(shardRangesSegments))
+
+ require.Equal(t, seg1, shardRangesSegments[0].segments[0])
}
func TestBlockAddResultsAfterCloseFails(t *testing.T) {
@@ -855,14 +981,17 @@ func TestBlockAddResultsAfterCloseFails(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.NoError(t, blk.Close())
seg1 := segment.NewMockMutableSegment(ctrl)
- require.Error(t, blk.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.Error(t, blk.AddResults(results))
}
func TestBlockAddResultsAfterSealWorks(t *testing.T) {
@@ -871,7 +1000,8 @@ func TestBlockAddResultsAfterSealWorks(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.NoError(t, blk.Seal())
@@ -879,12 +1009,15 @@ func TestBlockAddResultsAfterSealWorks(t *testing.T) {
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
- require.NoError(t, blk.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
- require.Equal(t, 1, len(b.shardRangesSegments))
-
- require.Equal(t, seg1, b.shardRangesSegments[0].segments[0])
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.NoError(t, b.AddResults(results))
+ shardRangesSegments := b.shardRangesSegmentsByVolumeType[idxpersist.DefaultIndexVolumeType]
+ require.Equal(t, 1, len(shardRangesSegments))
+
+ require.Equal(t, seg1, shardRangesSegments[0].segments[0])
}
func TestBlockTickSingleSegment(t *testing.T) {
@@ -893,14 +1026,15 @@ func TestBlockTickSingleSegment(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockSegment(ctrl)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
seg1.EXPECT().Size().Return(int64(10))
result, err := blk.Tick(nil)
@@ -915,21 +1049,24 @@ func TestBlockTickMultipleSegment(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockSegment(ctrl)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
seg1.EXPECT().Size().Return(int64(10))
seg2 := segment.NewMockMutableSegment(ctrl)
seg2.EXPECT().Size().Return(int64(20))
- require.NoError(t, blk.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg2},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.NoError(t, b.AddResults(results))
result, err := blk.Tick(nil)
require.NoError(t, err)
@@ -943,7 +1080,8 @@ func TestBlockTickAfterSeal(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.NoError(t, blk.Seal())
@@ -951,7 +1089,7 @@ func TestBlockTickAfterSeal(t *testing.T) {
require.True(t, ok)
seg1 := segment.NewMockSegment(ctrl)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
seg1.EXPECT().Size().Return(int64(10))
result, err := blk.Tick(nil)
@@ -966,7 +1104,8 @@ func TestBlockTickAfterClose(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.NoError(t, blk.Close())
@@ -980,19 +1119,25 @@ func TestBlockAddResultsRangeCheck(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
- require.Error(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start.Add(-1*time.Minute), start.Add(time.Hour), 1, 2, 3))))
- require.Error(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(2*time.Hour), 1, 2, 3))))
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start.Add(-1*time.Minute), start, 1, 2, 3)))
+ require.Error(t, b.AddResults(results))
+
+ results = result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(2*time.Hour), 1, 2, 3)))
+ require.Error(t, b.AddResults(results))
}
func TestBlockAddResultsCoversCurrentData(t *testing.T) {
@@ -1001,22 +1146,27 @@ func TestBlockAddResultsCoversCurrentData(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.NoError(t, b.AddResults(results))
seg2 := segment.NewMockMutableSegment(ctrl)
seg1.EXPECT().Close().Return(nil)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg2},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3, 4))))
+ results = result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3, 4)))
+ require.NoError(t, b.AddResults(results))
require.NoError(t, b.Seal())
seg2.EXPECT().Close().Return(nil)
@@ -1029,21 +1179,26 @@ func TestBlockAddResultsDoesNotCoverCurrentData(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.NoError(t, b.AddResults(results))
seg2 := segment.NewMockMutableSegment(ctrl)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg2},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 5))))
+ results = result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 5)))
+ require.NoError(t, b.AddResults(results))
require.NoError(t, b.Seal())
@@ -1058,7 +1213,8 @@ func TestBlockNeedsMutableSegmentsEvicted(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
@@ -1092,7 +1248,8 @@ func TestBlockNeedsMutableSegmentsEvictedMutableSegments(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
@@ -1102,17 +1259,21 @@ func TestBlockNeedsMutableSegmentsEvictedMutableSegments(t *testing.T) {
require.False(t, b.NeedsMutableSegmentsEvicted())
seg1 := segment.NewMockMutableSegment(ctrl)
seg1.EXPECT().Size().Return(int64(0)).AnyTimes()
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.NoError(t, b.AddResults(results))
require.False(t, b.NeedsMutableSegmentsEvicted())
seg2 := segment.NewMockMutableSegment(ctrl)
seg2.EXPECT().Size().Return(int64(1)).AnyTimes()
seg3 := segment.NewMockSegment(ctrl)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg2, seg3},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 4))))
+ results = result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, true), result.NewSegment(seg3, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 4)))
+ require.NoError(t, b.AddResults(results))
require.True(t, b.NeedsMutableSegmentsEvicted())
}
@@ -1122,7 +1283,8 @@ func TestBlockEvictMutableSegmentsSimple(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
err = blk.EvictMutableSegments()
require.Error(t, err)
@@ -1138,7 +1300,8 @@ func TestBlockEvictMutableSegmentsAddResults(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
@@ -1146,18 +1309,22 @@ func TestBlockEvictMutableSegmentsAddResults(t *testing.T) {
require.NoError(t, b.Seal())
seg1 := segment.NewMockMutableSegment(ctrl)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg1},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 3))))
+ results := result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 3)))
+ require.NoError(t, b.AddResults(results))
seg1.EXPECT().Close().Return(nil)
err = b.EvictMutableSegments()
require.NoError(t, err)
seg2 := segment.NewMockMutableSegment(ctrl)
seg3 := segment.NewMockSegment(ctrl)
- require.NoError(t, b.AddResults(
- result.NewIndexBlock(start, []segment.Segment{seg2, seg3},
- result.NewShardTimeRanges(start, start.Add(time.Hour), 1, 2, 4))))
+ results = result.NewIndexBlockByVolumeType(start)
+ results.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, true), result.NewSegment(seg3, true)},
+ result.NewShardTimeRangesFromRange(start, start.Add(time.Hour), 1, 2, 4)))
+ require.NoError(t, b.AddResults(results))
seg2.EXPECT().Close().Return(nil)
err = b.EvictMutableSegments()
require.NoError(t, err)
@@ -1186,7 +1353,9 @@ func TestBlockE2EInsertQuery(t *testing.T) {
BlockOptions{
ForegroundCompactorMmapDocsData: true,
BackgroundCompactorMmapDocsData: true,
- }, testOpts)
+ },
+ namespace.NewRuntimeOptionsManager("foo"),
+ testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
@@ -1265,7 +1434,8 @@ func TestBlockE2EInsertQueryLimit(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- blk, err := NewBlock(blockStart, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
@@ -1302,7 +1472,7 @@ func TestBlockE2EInsertQueryLimit(t *testing.T) {
results := NewQueryResults(nil,
QueryResultsOptions{SizeLimit: limit}, testOpts)
exhaustive, err := b.Query(context.NewContext(), resource.NewCancellableLifetime(),
- Query{q}, QueryOptions{Limit: limit}, results, emptyLogFields)
+ Query{q}, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
require.Equal(t, 1, results.Size())
@@ -1342,7 +1512,8 @@ func TestBlockE2EInsertAddResultsQuery(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- blk, err := NewBlock(blockStart, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
@@ -1373,9 +1544,11 @@ func TestBlockE2EInsertAddResultsQuery(t *testing.T) {
require.Equal(t, int64(0), res.NumError)
seg := testSegment(t, testDoc1DupeID())
- require.NoError(t, blk.AddResults(
- result.NewIndexBlock(blockStart, []segment.Segment{seg},
- result.NewShardTimeRanges(blockStart, blockStart.Add(blockSize), 1, 2, 3))))
+ idxResults := result.NewIndexBlockByVolumeType(blockStart)
+ idxResults.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg, true)},
+ result.NewShardTimeRangesFromRange(blockStart, blockStart.Add(blockSize), 1, 2, 3)))
+ require.NoError(t, blk.AddResults(idxResults))
q, err := idx.NewRegexpQuery([]byte("bar"), []byte("b.*"))
require.NoError(t, err)
@@ -1426,7 +1599,8 @@ func TestBlockE2EInsertAddResultsMergeQuery(t *testing.T) {
Truncate(blockSize).
Add(time.Minute)
- blk, err := NewBlock(blockStart, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
@@ -1449,9 +1623,11 @@ func TestBlockE2EInsertAddResultsMergeQuery(t *testing.T) {
require.Equal(t, int64(0), res.NumError)
seg := testSegment(t, testDoc2())
- require.NoError(t, blk.AddResults(
- result.NewIndexBlock(blockStart, []segment.Segment{seg},
- result.NewShardTimeRanges(blockStart, blockStart.Add(blockSize), 1, 2, 3))))
+ idxResults := result.NewIndexBlockByVolumeType(blockStart)
+ idxResults.SetBlock(idxpersist.DefaultIndexVolumeType,
+ result.NewIndexBlock([]result.Segment{result.NewSegment(seg, true)},
+ result.NewShardTimeRangesFromRange(blockStart, blockStart.Add(blockSize), 1, 2, 3)))
+ require.NoError(t, blk.AddResults(idxResults))
q, err := idx.NewRegexpQuery([]byte("bar"), []byte("b.*"))
require.NoError(t, err)
@@ -1507,7 +1683,8 @@ func TestBlockWriteBackgroundCompact(t *testing.T) {
testOpts = testOpts.SetInstrumentOptions(
testOpts.InstrumentOptions().SetLogger(logger))
- blk, err := NewBlock(blockStart, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(blockStart, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
defer func() {
require.NoError(t, blk.Close())
@@ -1544,8 +1721,8 @@ func TestBlockWriteBackgroundCompact(t *testing.T) {
// Move the segment to background
b.Lock()
- b.maybeMoveForegroundSegmentsToBackgroundWithLock([]compaction.Segment{
- {Segment: b.foregroundSegments[0].Segment()},
+ b.mutableSegments.maybeMoveForegroundSegmentsToBackgroundWithLock([]compaction.Segment{
+ {Segment: b.mutableSegments.foregroundSegments[0].Segment()},
})
b.Unlock()
@@ -1568,19 +1745,19 @@ func TestBlockWriteBackgroundCompact(t *testing.T) {
require.Equal(t, int64(0), res.NumError)
// Move last segment to background, this should kick off a background compaction
- b.Lock()
- b.maybeMoveForegroundSegmentsToBackgroundWithLock([]compaction.Segment{
- {Segment: b.foregroundSegments[0].Segment()},
+ b.mutableSegments.Lock()
+ b.mutableSegments.maybeMoveForegroundSegmentsToBackgroundWithLock([]compaction.Segment{
+ {Segment: b.mutableSegments.foregroundSegments[0].Segment()},
})
- require.Equal(t, 2, len(b.backgroundSegments))
- require.True(t, b.compact.compactingBackground)
- b.Unlock()
+ require.Equal(t, 2, len(b.mutableSegments.backgroundSegments))
+ require.True(t, b.mutableSegments.compact.compactingBackground)
+ b.mutableSegments.Unlock()
// Wait for compaction to finish
for {
- b.RLock()
- compacting := b.compact.compactingBackground
- b.RUnlock()
+ b.mutableSegments.RLock()
+ compacting := b.mutableSegments.compact.compactingBackground
+ b.mutableSegments.RUnlock()
if !compacting {
break
}
@@ -1588,16 +1765,17 @@ func TestBlockWriteBackgroundCompact(t *testing.T) {
}
// Make sure compacted into a single segment
- b.RLock()
- require.Equal(t, 1, len(b.backgroundSegments))
- require.Equal(t, 3, int(b.backgroundSegments[0].Segment().Size()))
- b.RUnlock()
+ b.mutableSegments.RLock()
+ require.Equal(t, 1, len(b.mutableSegments.backgroundSegments))
+ require.Equal(t, 3, int(b.mutableSegments.backgroundSegments[0].Segment().Size()))
+ b.mutableSegments.RUnlock()
}
func TestBlockAggregateAfterClose(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- b, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ b, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
require.Equal(t, start, b.StartTime())
@@ -1615,18 +1793,22 @@ func TestBlockAggregateIterationErr(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ reader.EXPECT().Close().Return(nil)
+ seg1.EXPECT().Reader().Return(reader, nil)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
iter := NewMockfieldsAndTermsIterator(ctrl)
b.newFieldsAndTermsIteratorFn = func(
- s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
+ _ segment.Reader, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
return iter, nil
}
@@ -1636,14 +1818,18 @@ func TestBlockAggregateIterationErr(t *testing.T) {
}, testOpts)
gomock.InOrder(
- iter.EXPECT().Reset(seg1, gomock.Any()).Return(nil),
+ iter.EXPECT().Reset(reader, gomock.Any()).Return(nil),
iter.EXPECT().Next().Return(true),
iter.EXPECT().Current().Return([]byte("f1"), []byte("t1")),
iter.EXPECT().Next().Return(false),
iter.EXPECT().Err().Return(fmt.Errorf("unknown error")),
iter.EXPECT().Close().Return(nil),
)
- _, err = b.Aggregate(context.NewContext(), resource.NewCancellableLifetime(), QueryOptions{Limit: 3}, results, emptyLogFields)
+
+ ctx := context.NewContext()
+ defer ctx.BlockingClose()
+
+ _, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 3}, results, emptyLogFields)
require.Error(t, err)
}
@@ -1653,18 +1839,22 @@ func TestBlockAggregate(t *testing.T) {
testMD := newTestNSMetadata(t)
start := time.Now().Truncate(time.Hour)
- blk, err := NewBlock(start, testMD, BlockOptions{}, testOpts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ reader.EXPECT().Close().Return(nil)
+ seg1.EXPECT().Reader().Return(reader, nil)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
iter := NewMockfieldsAndTermsIterator(ctrl)
b.newFieldsAndTermsIteratorFn = func(
- s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
+ _ segment.Reader, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
return iter, nil
}
@@ -1674,13 +1864,15 @@ func TestBlockAggregate(t *testing.T) {
}, testOpts)
ctx := context.NewContext()
+ defer ctx.BlockingClose()
+
// create initial span from a mock tracer and get ctx
mtr := mocktracer.New()
sp := mtr.StartSpan("root")
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
gomock.InOrder(
- iter.EXPECT().Reset(seg1, gomock.Any()).Return(nil),
+ iter.EXPECT().Reset(reader, gomock.Any()).Return(nil),
iter.EXPECT().Next().Return(true),
iter.EXPECT().Current().Return([]byte("f1"), []byte("t1")),
iter.EXPECT().Next().Return(true),
@@ -1693,7 +1885,7 @@ func TestBlockAggregate(t *testing.T) {
iter.EXPECT().Err().Return(nil),
iter.EXPECT().Close().Return(nil),
)
- exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{Limit: 3}, results, emptyLogFields)
+ exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 3}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -1724,18 +1916,22 @@ func TestBlockAggregateNotExhaustive(t *testing.T) {
aggResultsEntryArrayPool.Init()
opts := testOpts.SetAggregateResultsEntryArrayPool(aggResultsEntryArrayPool)
- blk, err := NewBlock(start, testMD, BlockOptions{}, opts)
+ blk, err := NewBlock(start, testMD, BlockOptions{},
+ namespace.NewRuntimeOptionsManager("foo"), opts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
seg1 := segment.NewMockMutableSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ reader.EXPECT().Close().Return(nil)
+ seg1.EXPECT().Reader().Return(reader, nil)
- b.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
+ b.mutableSegments.foregroundSegments = []*readableSeg{newReadableSeg(seg1, testOpts)}
iter := NewMockfieldsAndTermsIterator(ctrl)
b.newFieldsAndTermsIteratorFn = func(
- s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
+ _ segment.Reader, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
return iter, nil
}
@@ -1745,20 +1941,22 @@ func TestBlockAggregateNotExhaustive(t *testing.T) {
}, testOpts)
ctx := context.NewContext()
+ defer ctx.BlockingClose()
+
// create initial span from a mock tracer and get ctx
mtr := mocktracer.New()
sp := mtr.StartSpan("root")
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
gomock.InOrder(
- iter.EXPECT().Reset(seg1, gomock.Any()).Return(nil),
+ iter.EXPECT().Reset(reader, gomock.Any()).Return(nil),
iter.EXPECT().Next().Return(true),
iter.EXPECT().Current().Return([]byte("f1"), []byte("t1")),
iter.EXPECT().Next().Return(true),
iter.EXPECT().Err().Return(nil),
iter.EXPECT().Close().Return(nil),
)
- exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{Limit: 1}, results, emptyLogFields)
+ exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 1}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -1795,7 +1993,9 @@ func TestBlockE2EInsertAggregate(t *testing.T) {
BlockOptions{
ForegroundCompactorMmapDocsData: true,
BackgroundCompactorMmapDocsData: true,
- }, testOpts)
+ },
+ namespace.NewRuntimeOptionsManager("foo"),
+ testOpts)
require.NoError(t, err)
b, ok := blk.(*block)
require.True(t, ok)
@@ -1843,7 +2043,7 @@ func TestBlockE2EInsertAggregate(t *testing.T) {
sp := mtr.StartSpan("root")
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
- exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{Limit: 10}, results, emptyLogFields)
+ exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 10}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
assertAggregateResultsMapEquals(t, map[string][]string{
@@ -1856,7 +2056,7 @@ func TestBlockE2EInsertAggregate(t *testing.T) {
Type: AggregateTagNamesAndValues,
FieldFilter: AggregateFieldFilter{[]byte("bar")},
}, testOpts)
- exhaustive, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{Limit: 10}, results, emptyLogFields)
+ exhaustive, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 10}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
assertAggregateResultsMapEquals(t, map[string][]string{
@@ -1868,7 +2068,7 @@ func TestBlockE2EInsertAggregate(t *testing.T) {
Type: AggregateTagNamesAndValues,
FieldFilter: AggregateFieldFilter{[]byte("random")},
}, testOpts)
- exhaustive, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{Limit: 10}, results, emptyLogFields)
+ exhaustive, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 10}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
assertAggregateResultsMapEquals(t, map[string][]string{}, results)
@@ -1886,11 +2086,13 @@ func assertAggregateResultsMapEquals(t *testing.T, expected map[string][]string,
// ensure `expected` contained in `observed`
for field, terms := range expected {
entry, ok := aggResultsMap.Get(ident.StringID(field))
- require.True(t, ok, "field from expected map missing in observed", field)
+ require.True(t, ok,
+ fmt.Sprintf("field from expected map missing in observed: field=%s", field))
valuesMap := entry.valuesMap
for _, term := range terms {
_, ok = valuesMap.Get(ident.StringID(term))
- require.True(t, ok, "term from expected map missing in observed", field, term)
+ require.True(t, ok,
+ fmt.Sprintf("term from expected map missing in observed: field=%s, term=%s", field, term))
}
}
// ensure `observed` contained in `expected`
@@ -1900,20 +2102,22 @@ func assertAggregateResultsMapEquals(t *testing.T, expected map[string][]string,
for _, entry := range valuesMap.Iter() {
term := entry.Key()
slice, ok := expected[field.String()]
- require.True(t, ok, "field from observed map missing in expected", field.String())
+ require.True(t, ok,
+ fmt.Sprintf("field from observed map missing in expected: field=%s", field.String()))
found := false
for _, expTerm := range slice {
if expTerm == term.String() {
found = true
}
}
- require.True(t, found, "term from observed map missing in expected", field.String(), term.String())
+ require.True(t, found,
+ fmt.Sprintf("term from observed map missing in expected: field=%s, term=%s", field.String(), term.String()))
}
}
}
func testSegment(t *testing.T, docs ...doc.Document) segment.Segment {
- seg, err := mem.NewSegment(0, testOpts.MemSegmentOptions())
+ seg, err := mem.NewSegment(testOpts.MemSegmentOptions())
require.NoError(t, err)
for _, d := range docs {
diff --git a/src/dbnode/storage/index/compaction/compactor.go b/src/dbnode/storage/index/compaction/compactor.go
index 8fc53a7f6c..288d63ef35 100644
--- a/src/dbnode/storage/index/compaction/compactor.go
+++ b/src/dbnode/storage/index/compaction/compactor.go
@@ -114,7 +114,7 @@ func (c *Compactor) Compact(
return nil, errCompactorClosed
}
- c.builder.Reset(0)
+ c.builder.Reset()
if err := c.builder.AddSegments(segs); err != nil {
return nil, err
}
@@ -236,7 +236,7 @@ func (c *Compactor) compactFromBuilderWithLock(
// Release resources regardless of result,
// otherwise old compacted segments are held onto
// strongly
- builder.Reset(0)
+ builder.Reset()
}()
// Since this builder is likely reused between compaction
@@ -275,7 +275,7 @@ func (c *Compactor) compactFromBuilderWithLock(
// rather than encoding them and mmap'ing the encoded documents.
allDocsCopy := make([]doc.Document, len(allDocs))
copy(allDocsCopy, allDocs)
- fstData.DocsReader = docs.NewSliceReader(0, allDocsCopy)
+ fstData.DocsReader = docs.NewSliceReader(allDocsCopy)
} else {
// Otherwise encode and reference the encoded bytes as mmap'd bytes.
c.buff.Reset()
diff --git a/src/dbnode/storage/index/compaction/compactor_test.go b/src/dbnode/storage/index/compaction/compactor_test.go
index 405bcad4b3..7a40e12b7b 100644
--- a/src/dbnode/storage/index/compaction/compactor_test.go
+++ b/src/dbnode/storage/index/compaction/compactor_test.go
@@ -81,7 +81,7 @@ func init() {
}
func TestCompactorSingleMutableSegment(t *testing.T) {
- seg, err := mem.NewSegment(0, testMemSegmentOptions)
+ seg, err := mem.NewSegment(testMemSegmentOptions)
require.NoError(t, err)
_, err = seg.Insert(testDocuments[0])
@@ -105,7 +105,7 @@ func TestCompactorSingleMutableSegment(t *testing.T) {
}
func TestCompactorSingleMutableSegmentWithMmapDocsData(t *testing.T) {
- seg, err := mem.NewSegment(0, testMemSegmentOptions)
+ seg, err := mem.NewSegment(testMemSegmentOptions)
require.NoError(t, err)
_, err = seg.Insert(testDocuments[0])
@@ -131,13 +131,13 @@ func TestCompactorSingleMutableSegmentWithMmapDocsData(t *testing.T) {
}
func TestCompactorManySegments(t *testing.T) {
- seg1, err := mem.NewSegment(0, testMemSegmentOptions)
+ seg1, err := mem.NewSegment(testMemSegmentOptions)
require.NoError(t, err)
_, err = seg1.Insert(testDocuments[0])
require.NoError(t, err)
- seg2, err := mem.NewSegment(0, testMemSegmentOptions)
+ seg2, err := mem.NewSegment(testMemSegmentOptions)
require.NoError(t, err)
_, err = seg2.Insert(testDocuments[1])
@@ -159,13 +159,13 @@ func TestCompactorManySegments(t *testing.T) {
}
func TestCompactorCompactDuplicateIDsNoError(t *testing.T) {
- seg1, err := mem.NewSegment(0, testMemSegmentOptions)
+ seg1, err := mem.NewSegment(testMemSegmentOptions)
require.NoError(t, err)
_, err = seg1.Insert(testDocuments[0])
require.NoError(t, err)
- seg2, err := mem.NewSegment(0, testMemSegmentOptions)
+ seg2, err := mem.NewSegment(testMemSegmentOptions)
require.NoError(t, err)
_, err = seg2.Insert(testDocuments[0])
diff --git a/src/dbnode/storage/index/convert/convert.go b/src/dbnode/storage/index/convert/convert.go
index a3883e4105..effb5aad1c 100644
--- a/src/dbnode/storage/index/convert/convert.go
+++ b/src/dbnode/storage/index/convert/convert.go
@@ -27,6 +27,7 @@ import (
"unicode/utf8"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/query/graphite/graphite"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
)
@@ -46,10 +47,36 @@ var (
"corrupt data, unable to extract id")
)
-// ValidateSeries will validate a metric for use with m3ninx.
+// Validate returns a bool indicating whether the document is valid.
+func Validate(d doc.Document) error {
+ if !utf8.Valid(d.ID) {
+ return fmt.Errorf("document has invalid non-UTF8 ID: id=%v, id_hex=%x",
+ d.ID, d.ID)
+ }
+
+ for _, f := range d.Fields {
+ if !utf8.Valid(f.Name) {
+ return fmt.Errorf("document has invalid non-UTF8 field name: name=%v, name_hex=%x",
+ f.Name, f.Name)
+ }
+
+ if bytes.Equal(f.Name, ReservedFieldNameID) {
+ return ErrUsingReservedFieldName
+ }
+
+ if !utf8.Valid(f.Value) {
+ return fmt.Errorf("document has invalid non-UTF8 field value: value=%v, value_hex=%x",
+ f.Value, f.Value)
+ }
+ }
+
+ return nil
+}
+
+// ValidateSeries will validate a series for use with m3ninx.
func ValidateSeries(id ident.ID, tags ident.Tags) error {
if idBytes := id.Bytes(); !utf8.Valid(idBytes) {
- return fmt.Errorf("series has invalid ID: id=%s, id_hex=%x",
+ return fmt.Errorf("series has invalid non-UTF8 ID: id=%s, id_hex=%x",
idBytes, idBytes)
}
for _, tag := range tags.Values() {
@@ -68,28 +95,22 @@ func ValidateSeriesTag(tag ident.Tag) error {
return ErrUsingReservedFieldName
}
if !utf8.Valid(tagName) {
- return fmt.Errorf("series contains invalid field name: "+
+ return fmt.Errorf("series contains invalid non-UTF8 field name: "+
"field=%s, field_hex=%v", tagName, tagName)
}
if !utf8.Valid(tagValue) {
- return fmt.Errorf("series contains invalid field value: "+
+ return fmt.Errorf("series contains invalid non-UTF8 field value: "+
"field=%s, field_value=%s, field_value_hex=%x",
tagName, tagValue, tagValue)
}
return nil
}
-// FromMetric converts the provided metric id+tags into a document.
-// FOLLOWUP(r): Rename FromMetric to FromSeries (metric terminiology
-// is not common in the codebase)
-func FromMetric(id ident.ID, tags ident.Tags) (doc.Document, error) {
+// FromSeriesIDAndTags converts the provided series id+tags into a document.
+func FromSeriesIDAndTags(id ident.ID, tags ident.Tags) (doc.Document, error) {
clonedID := clone(id)
fields := make([]doc.Field, 0, len(tags.Values()))
for _, tag := range tags.Values() {
- if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) {
- return doc.Document{}, ErrUsingReservedFieldName
- }
-
nameBytes, valueBytes := tag.Name.Bytes(), tag.Value.Bytes()
var clonedName, clonedValue []byte
@@ -109,42 +130,23 @@ func FromMetric(id ident.ID, tags ident.Tags) (doc.Document, error) {
Value: clonedValue,
})
}
- return doc.Document{
+
+ d := doc.Document{
ID: clonedID,
Fields: fields,
- }, nil
-}
-
-// FromMetricNoClone converts the provided metric id+tags into a document without cloning.
-func FromMetricNoClone(id ident.ID, tags ident.Tags) (doc.Document, error) {
- fields := make([]doc.Field, 0, len(tags.Values()))
- for _, tag := range tags.Values() {
- if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) {
- return doc.Document{}, ErrUsingReservedFieldName
- }
- fields = append(fields, doc.Field{
- Name: tag.Name.Bytes(),
- Value: tag.Value.Bytes(),
- })
}
- return doc.Document{
- ID: id.Bytes(),
- Fields: fields,
- }, nil
+ if err := Validate(d); err != nil {
+ return doc.Document{}, err
+ }
+ return d, nil
}
-// FromMetricIter converts the provided metric id+tags into a document.
-// FOLLOWUP(r): Rename FromMetric to FromSeries (metric terminiology
-// is not common in the codebase)
-func FromMetricIter(id ident.ID, tags ident.TagIterator) (doc.Document, error) {
+// FromSeriesIDAndTagIter converts the provided series id+tags into a document.
+func FromSeriesIDAndTagIter(id ident.ID, tags ident.TagIterator) (doc.Document, error) {
clonedID := clone(id)
fields := make([]doc.Field, 0, tags.Remaining())
for tags.Next() {
tag := tags.Current()
- if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) {
- return doc.Document{}, ErrUsingReservedFieldName
- }
-
nameBytes, valueBytes := tag.Name.Bytes(), tag.Value.Bytes()
var clonedName, clonedValue []byte
@@ -167,33 +169,15 @@ func FromMetricIter(id ident.ID, tags ident.TagIterator) (doc.Document, error) {
if err := tags.Err(); err != nil {
return doc.Document{}, err
}
- return doc.Document{
+
+ d := doc.Document{
ID: clonedID,
Fields: fields,
- }, nil
-}
-
-// FromMetricIterNoClone converts the provided metric id+tags iterator into a
-// document without cloning.
-func FromMetricIterNoClone(id ident.ID, tags ident.TagIterator) (doc.Document, error) {
- fields := make([]doc.Field, 0, tags.Remaining())
- for tags.Next() {
- tag := tags.Current()
- if bytes.Equal(ReservedFieldNameID, tag.Name.Bytes()) {
- return doc.Document{}, ErrUsingReservedFieldName
- }
- fields = append(fields, doc.Field{
- Name: tag.Name.Bytes(),
- Value: tag.Value.Bytes(),
- })
}
- if err := tags.Err(); err != nil {
+ if err := Validate(d); err != nil {
return doc.Document{}, err
}
- return doc.Document{
- ID: id.Bytes(),
- Fields: fields,
- }, nil
+ return d, nil
}
// TagsFromTagsIter returns an ident.Tags from a TagIterator. It also tries
@@ -228,7 +212,13 @@ func TagsFromTagsIter(
idRef = true
} else {
if idPool != nil {
- tag.Name = idPool.Clone(curr.Name)
+ // NB(r): Fast path for if a graphite tag name to save
+ // a lot of space is to reuse a preallocated tag name.
+ if idx, ok := graphite.TagIndex(nameBytes); ok {
+ tag.Name = graphite.TagNameID(idx)
+ } else {
+ tag.Name = idPool.Clone(curr.Name)
+ }
} else {
copiedBytes := append([]byte(nil), curr.Name.Bytes()...)
tag.Name = ident.BytesID(copiedBytes)
@@ -292,16 +282,16 @@ func (o Opts) wrapBytes(b []byte) ident.ID {
return id
}
-// ToMetric converts the provided doc to metric id+tags.
-func ToMetric(d doc.Document, opts Opts) (ident.ID, ident.TagIterator, error) {
+// ToSeries converts the provided doc to metric id+tags.
+func ToSeries(d doc.Document, opts Opts) (ident.ID, ident.TagIterator, error) {
if len(d.ID) == 0 {
return nil, nil, errInvalidResultMissingID
}
- return opts.wrapBytes(d.ID), ToMetricTags(d, opts), nil
+ return opts.wrapBytes(d.ID), ToSeriesTags(d, opts), nil
}
-// ToMetricTags converts the provided doc to metric tags.
-func ToMetricTags(d doc.Document, opts Opts) ident.TagIterator {
+// ToSeriesTags converts the provided doc to metric tags.
+func ToSeriesTags(d doc.Document, opts Opts) ident.TagIterator {
return newTagIter(d, opts)
}
@@ -408,3 +398,9 @@ func (t *tagIter) Duplicate() ident.TagIterator {
}
return &dupe
}
+
+func (t *tagIter) Rewind() {
+ t.releaseCurrent()
+ t.currentIdx = -1
+ t.done = false
+}
diff --git a/src/dbnode/storage/index/convert/convert_test.go b/src/dbnode/storage/index/convert/convert_test.go
index afb848cd4b..932dc3c493 100644
--- a/src/dbnode/storage/index/convert/convert_test.go
+++ b/src/dbnode/storage/index/convert/convert_test.go
@@ -49,52 +49,30 @@ func init() {
testOpts.IdentPool = idPool
}
-func TestFromMetricInvalid(t *testing.T) {
+func TestFromSeriesIDAndTagsInvalid(t *testing.T) {
id := ident.StringID("foo")
tags := ident.NewTags(
ident.StringTag(string(convert.ReservedFieldNameID), "value"),
)
- _, err := convert.FromMetric(id, tags)
+ _, err := convert.FromSeriesIDAndTags(id, tags)
assert.Error(t, err)
}
-func TestFromMetricNoCloneInvalid(t *testing.T) {
+func TestFromSeriesIDAndTagIteratorInvalid(t *testing.T) {
id := ident.StringID("foo")
tags := ident.NewTags(
ident.StringTag(string(convert.ReservedFieldNameID), "value"),
)
- _, err := convert.FromMetricNoClone(id, tags)
+ _, err := convert.FromSeriesIDAndTagIter(id, ident.NewTagsIterator(tags))
assert.Error(t, err)
}
-func TestFromMetricIteratorInvalid(t *testing.T) {
- id := ident.StringID("foo")
- tags := ident.NewTags(
- ident.StringTag(string(convert.ReservedFieldNameID), "value"),
- )
- _, err := convert.FromMetricIter(id, ident.NewTagsIterator(tags))
- assert.Error(t, err)
-}
-
-func TestFromMetricValid(t *testing.T) {
- id := ident.StringID("foo")
- tags := ident.NewTags(
- ident.StringTag("bar", "baz"),
- )
- d, err := convert.FromMetric(id, tags)
- assert.NoError(t, err)
- assert.Equal(t, "foo", string(d.ID))
- assert.Len(t, d.Fields, 1)
- assert.Equal(t, "bar", string(d.Fields[0].Name))
- assert.Equal(t, "baz", string(d.Fields[0].Value))
-}
-
-func TestFromMetricNoCloneValid(t *testing.T) {
+func TestFromSeriesIDAndTagsValid(t *testing.T) {
id := ident.StringID("foo")
tags := ident.NewTags(
ident.StringTag("bar", "baz"),
)
- d, err := convert.FromMetricNoClone(id, tags)
+ d, err := convert.FromSeriesIDAndTags(id, tags)
assert.NoError(t, err)
assert.Equal(t, "foo", string(d.ID))
assert.Len(t, d.Fields, 1)
@@ -102,12 +80,12 @@ func TestFromMetricNoCloneValid(t *testing.T) {
assert.Equal(t, "baz", string(d.Fields[0].Value))
}
-func TestFromMetricIterValid(t *testing.T) {
+func TestFromSeriesIDAndTagIterValid(t *testing.T) {
id := ident.StringID("foo")
tags := ident.NewTags(
ident.StringTag("bar", "baz"),
)
- d, err := convert.FromMetricIter(id, ident.NewTagsIterator(tags))
+ d, err := convert.FromSeriesIDAndTagIter(id, ident.NewTagsIterator(tags))
assert.NoError(t, err)
assert.Equal(t, "foo", string(d.ID))
assert.Len(t, d.Fields, 1)
@@ -115,7 +93,7 @@ func TestFromMetricIterValid(t *testing.T) {
assert.Equal(t, "baz", string(d.Fields[0].Value))
}
-func TestToMetricValid(t *testing.T) {
+func TestToSeriesValid(t *testing.T) {
d := doc.Document{
ID: []byte("foo"),
Fields: []doc.Field{
@@ -123,7 +101,7 @@ func TestToMetricValid(t *testing.T) {
doc.Field{Name: []byte("some"), Value: []byte("others")},
},
}
- id, tags, err := convert.ToMetric(d, testOpts)
+ id, tags, err := convert.ToSeries(d, testOpts)
assert.NoError(t, err)
assert.Equal(t, 2, tags.Remaining())
assert.Equal(t, "foo", id.String())
@@ -161,24 +139,24 @@ func TestTagsFromTagsIterNoPool(t *testing.T) {
require.True(t, true, expectedTags.Equal(tags))
}
-func TestToMetricInvalidID(t *testing.T) {
+func TestToSeriesInvalidID(t *testing.T) {
d := doc.Document{
Fields: []doc.Field{
doc.Field{Name: []byte("bar"), Value: []byte("baz")},
},
}
- _, _, err := convert.ToMetric(d, testOpts)
+ _, _, err := convert.ToSeries(d, testOpts)
assert.Error(t, err)
}
-func TestToMetricInvalidTag(t *testing.T) {
+func TestToSeriesInvalidTag(t *testing.T) {
d := doc.Document{
ID: []byte("foo"),
Fields: []doc.Field{
doc.Field{Name: convert.ReservedFieldNameID, Value: []byte("baz")},
},
}
- _, tags, err := convert.ToMetric(d, testOpts)
+ _, tags, err := convert.ToSeries(d, testOpts)
assert.NoError(t, err)
assert.False(t, tags.Next())
assert.Error(t, tags.Err())
@@ -201,7 +179,7 @@ func TestValidateSeries(t *testing.T) {
Value: ident.StringID("baz"),
}))
require.Error(t, err)
- assert.Contains(t, err.Error(), "invalid ID")
+ assert.Contains(t, err.Error(), "invalid non-UTF8 ID")
})
t.Run("tag name reserved", func(t *testing.T) {
@@ -222,7 +200,7 @@ func TestValidateSeries(t *testing.T) {
Value: ident.StringID("bar"),
}))
require.Error(t, err)
- assert.Contains(t, err.Error(), "invalid field name")
+ assert.Contains(t, err.Error(), "invalid non-UTF8 field name")
})
t.Run("tag value non-utf8", func(t *testing.T) {
@@ -232,7 +210,7 @@ func TestValidateSeries(t *testing.T) {
Value: ident.BinaryID(invalidBytes),
}))
require.Error(t, err)
- assert.Contains(t, err.Error(), "invalid field value")
+ assert.Contains(t, err.Error(), "invalid non-UTF8 field value")
})
}
diff --git a/src/dbnode/storage/index/fields_terms_iterator.go b/src/dbnode/storage/index/fields_terms_iterator.go
index 517e0f2c2b..186c32d47b 100644
--- a/src/dbnode/storage/index/fields_terms_iterator.go
+++ b/src/dbnode/storage/index/fields_terms_iterator.go
@@ -21,15 +21,25 @@
package index
import (
+ "errors"
+
"github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/m3ninx/postings"
+ "github.com/m3db/m3/src/m3ninx/postings/roaring"
xerrors "github.com/m3db/m3/src/x/errors"
+ pilosaroaring "github.com/m3dbx/pilosa/roaring"
+)
+
+var (
+ errUnpackBitmapFromPostingsList = errors.New("unable to unpack bitmap from postings list")
)
// fieldsAndTermsIteratorOpts configures the fieldsAndTermsIterator.
type fieldsAndTermsIteratorOpts struct {
- iterateTerms bool
- allowFn allowFn
- fieldIterFn newFieldIterFn
+ restrictByQuery *Query
+ iterateTerms bool
+ allowFn allowFn
+ fieldIterFn newFieldIterFn
}
func (o fieldsAndTermsIteratorOpts) allow(f []byte) bool {
@@ -39,29 +49,32 @@ func (o fieldsAndTermsIteratorOpts) allow(f []byte) bool {
return o.allowFn(f)
}
-func (o fieldsAndTermsIteratorOpts) newFieldIter(s segment.Segment) (segment.FieldsIterator, error) {
+func (o fieldsAndTermsIteratorOpts) newFieldIter(r segment.Reader) (segment.FieldsIterator, error) {
if o.fieldIterFn == nil {
- return s.FieldsIterable().Fields()
+ return r.Fields()
}
- return o.fieldIterFn(s)
+ return o.fieldIterFn(r)
}
type allowFn func(field []byte) bool
-type newFieldIterFn func(s segment.Segment) (segment.FieldsIterator, error)
+type newFieldIterFn func(r segment.Reader) (segment.FieldsIterator, error)
type fieldsAndTermsIter struct {
- seg segment.Segment
- opts fieldsAndTermsIteratorOpts
+ reader segment.Reader
+ opts fieldsAndTermsIteratorOpts
err error
fieldIter segment.FieldsIterator
termIter segment.TermsIterator
current struct {
- field []byte
- term []byte
+ field []byte
+ term []byte
+ postings postings.List
}
+
+ restrictByPostings *pilosaroaring.Bitmap
}
var (
@@ -72,30 +85,56 @@ var _ fieldsAndTermsIterator = &fieldsAndTermsIter{}
// newFieldsAndTermsIteratorFn is the lambda definition of the ctor for fieldsAndTermsIterator.
type newFieldsAndTermsIteratorFn func(
- s segment.Segment, opts fieldsAndTermsIteratorOpts,
+ r segment.Reader, opts fieldsAndTermsIteratorOpts,
) (fieldsAndTermsIterator, error)
-func newFieldsAndTermsIterator(s segment.Segment, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
+func newFieldsAndTermsIterator(reader segment.Reader, opts fieldsAndTermsIteratorOpts) (fieldsAndTermsIterator, error) {
iter := &fieldsAndTermsIter{}
- err := iter.Reset(s, opts)
+ err := iter.Reset(reader, opts)
if err != nil {
return nil, err
}
return iter, nil
}
-func (fti *fieldsAndTermsIter) Reset(s segment.Segment, opts fieldsAndTermsIteratorOpts) error {
+func (fti *fieldsAndTermsIter) Reset(reader segment.Reader, opts fieldsAndTermsIteratorOpts) error {
*fti = fieldsAndTermsIterZeroed
- fti.seg = s
+ fti.reader = reader
fti.opts = opts
- if s == nil {
+ if reader == nil {
return nil
}
- fiter, err := fti.opts.newFieldIter(s)
+
+ fiter, err := fti.opts.newFieldIter(reader)
if err != nil {
return err
}
fti.fieldIter = fiter
+
+ if opts.restrictByQuery == nil {
+ // No need to restrict results by query.
+ return nil
+ }
+
+ // If need to restrict by query, run the query on the segment first.
+ searcher, err := opts.restrictByQuery.SearchQuery().Searcher()
+ if err != nil {
+ return err
+ }
+
+ pl, err := searcher.Search(fti.reader)
+ if err != nil {
+ return err
+ }
+
+ // Hold onto the postings bitmap to intersect against on a per term basis.
+ bitmap, ok := roaring.BitmapFromPostingsList(pl)
+ if !ok {
+ return errUnpackBitmapFromPostingsList
+ }
+
+ fti.restrictByPostings = bitmap
+
return nil
}
@@ -121,47 +160,78 @@ func (fti *fieldsAndTermsIter) setNextField() bool {
func (fti *fieldsAndTermsIter) setNext() bool {
// check if current field has another term
if fti.termIter != nil {
- if fti.termIter.Next() {
- fti.current.term, _ = fti.termIter.Current()
+ hasNextTerm, err := fti.nextTermsIterResult()
+ if err != nil {
+ fti.err = err
+ return false
+ }
+ if hasNextTerm {
return true
}
- if err := fti.termIter.Err(); err != nil {
+ }
+
+ // i.e. need to switch to next field
+ for hasNextField := fti.setNextField(); hasNextField; hasNextField = fti.setNextField() {
+ // and get next term for the field
+ var err error
+ fti.termIter, err = fti.reader.Terms(fti.current.field)
+ if err != nil {
fti.err = err
return false
}
- if err := fti.termIter.Close(); err != nil {
+
+ hasNextTerm, err := fti.nextTermsIterResult()
+ if err != nil {
fti.err = err
return false
}
+ if hasNextTerm {
+ return true
+ }
}
- // i.e. need to switch to next field
- hasNext := fti.setNextField()
- if !hasNext {
- return false
- }
-
- // and get next term for the field
- termsIter, err := fti.seg.TermsIterable().Terms(fti.current.field)
- if err != nil {
+ // Check field iterator did not encounter error.
+ if err := fti.fieldIter.Err(); err != nil {
fti.err = err
return false
}
- fti.termIter = termsIter
- hasNext = fti.termIter.Next()
- if !hasNext {
- if fti.fieldIter.Err(); err != nil {
- fti.err = err
- return false
+ // No more fields.
+ return false
+}
+
+func (fti *fieldsAndTermsIter) nextTermsIterResult() (bool, error) {
+ for fti.termIter.Next() {
+ fti.current.term, fti.current.postings = fti.termIter.Current()
+ if fti.restrictByPostings == nil {
+ // No restrictions.
+ return true, nil
}
- fti.termIter = nil
- // i.e. no more terms for this field, should try the next one
- return fti.setNext()
- }
- fti.current.term, _ = fti.termIter.Current()
- return true
+ bitmap, ok := roaring.BitmapFromPostingsList(fti.current.postings)
+ if !ok {
+ return false, errUnpackBitmapFromPostingsList
+ }
+
+ // Check term isn part of at least some of the documents we're
+ // restricted to providing results for based on intersection
+ // count.
+ // Note: IntersectionCount is significantly faster than intersecting and
+ // counting results and also does not allocate.
+ if n := fti.restrictByPostings.IntersectionCount(bitmap); n > 0 {
+ // Matches, this is next result.
+ return true, nil
+ }
+ }
+ if err := fti.termIter.Err(); err != nil {
+ return false, err
+ }
+ if err := fti.termIter.Close(); err != nil {
+ return false, err
+ }
+ // Term iterator no longer relevant, no next.
+ fti.termIter = nil
+ return false, nil
}
func (fti *fieldsAndTermsIter) Next() bool {
diff --git a/src/dbnode/storage/index/field_terms_iterator_prop_test.go b/src/dbnode/storage/index/fields_terms_iterator_prop_test.go
similarity index 88%
rename from src/dbnode/storage/index/field_terms_iterator_prop_test.go
rename to src/dbnode/storage/index/fields_terms_iterator_prop_test.go
index 7df89549b6..91224e7e60 100644
--- a/src/dbnode/storage/index/field_terms_iterator_prop_test.go
+++ b/src/dbnode/storage/index/fields_terms_iterator_prop_test.go
@@ -51,8 +51,11 @@ func TestFieldsTermsIteratorPropertyTest(t *testing.T) {
properties.Property("Fields Terms Iteration works", prop.ForAll(
func(i fieldsTermsIteratorPropInput) (bool, error) {
expected := i.expected()
- seg := i.setup.asSegment(t)
- iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{
+ reader, err := i.setup.asSegment(t).Reader()
+ if err != nil {
+ return false, err
+ }
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{
iterateTerms: i.iterateTerms,
allowFn: i.allowFn,
})
@@ -87,8 +90,8 @@ func TestFieldsTermsIteratorPropertyTestNoPanic(t *testing.T) {
// on the happy path; this prop tests ensures we don't panic unless the underlying iterator
// itself panics.
properties.Property("Fields Terms Iteration doesn't blow up", prop.ForAll(
- func(seg segment.Segment, iterate bool) (bool, error) {
- iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{
+ func(reader segment.Reader, iterate bool) (bool, error) {
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{
iterateTerms: iterate,
})
if err != nil {
@@ -135,7 +138,7 @@ func (i fieldsTermsIteratorPropInput) expected() []pair {
func genIterableSegment(ctrl *gomock.Controller) gopter.Gen {
return gen.MapOf(genIterpoint(), gen.SliceOf(genIterpoint())).
- Map(func(tagValues map[iterpoint][]iterpoint) segment.Segment {
+ Map(func(tagValues map[iterpoint][]iterpoint) segment.Reader {
fields := make([]iterpoint, 0, len(tagValues))
for f := range tagValues {
fields = append(fields, f)
@@ -144,23 +147,20 @@ func genIterableSegment(ctrl *gomock.Controller) gopter.Gen {
return strings.Compare(fields[i].value, fields[j].value) < 0
})
- s := segment.NewMockSegment(ctrl)
- fieldIterable := segment.NewMockFieldsIterable(ctrl)
+ r := segment.NewMockReader(ctrl)
+
fieldIterator := &stubFieldIterator{points: fields}
- termsIterable := segment.NewMockTermsIterable(ctrl)
- s.EXPECT().FieldsIterable().Return(fieldIterable).AnyTimes()
- s.EXPECT().TermsIterable().Return(termsIterable).AnyTimes()
- fieldIterable.EXPECT().Fields().Return(fieldIterator, nil).AnyTimes()
+ r.EXPECT().Fields().Return(fieldIterator, nil).AnyTimes()
for f, values := range tagValues {
sort.Slice(values, func(i, j int) bool {
return strings.Compare(values[i].value, values[j].value) < 0
})
termIterator := &stubTermIterator{points: values}
- termsIterable.EXPECT().Terms([]byte(f.value)).Return(termIterator, nil).AnyTimes()
+ r.EXPECT().Terms([]byte(f.value)).Return(termIterator, nil).AnyTimes()
}
- return s
+ return r
})
}
diff --git a/src/dbnode/storage/index/field_terms_iterator_test.go b/src/dbnode/storage/index/fields_terms_iterator_test.go
similarity index 72%
rename from src/dbnode/storage/index/field_terms_iterator_test.go
rename to src/dbnode/storage/index/fields_terms_iterator_test.go
index 31ebfc39bb..b1ae28c8dd 100644
--- a/src/dbnode/storage/index/field_terms_iterator_test.go
+++ b/src/dbnode/storage/index/fields_terms_iterator_test.go
@@ -28,8 +28,11 @@ import (
"testing"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/idx"
+ m3ninxindex "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ "github.com/m3db/m3/src/m3ninx/index/segment/mem"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/util"
xtest "github.com/m3db/m3/src/x/test"
@@ -51,9 +54,10 @@ func TestFieldsTermsIteratorSimple(t *testing.T) {
pair{"i", "j"},
pair{"k", "l"},
)
- seg := s.asSegment(t)
+ reader, err := s.asSegment(t).Reader()
+ require.NoError(t, err)
- iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{iterateTerms: true})
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{iterateTerms: true})
require.NoError(t, err)
s.requireEquals(t, iter)
}
@@ -71,12 +75,14 @@ func TestFieldsTermsIteratorReuse(t *testing.T) {
require.NoError(t, err)
s := newFieldsTermsIterSetup(pairs...)
- seg := s.asSegment(t)
- err = iter.Reset(seg, fieldsAndTermsIteratorOpts{iterateTerms: true})
+ reader, err := s.asSegment(t).Reader()
+ require.NoError(t, err)
+
+ err = iter.Reset(reader, fieldsAndTermsIteratorOpts{iterateTerms: true})
require.NoError(t, err)
s.requireEquals(t, iter)
- err = iter.Reset(seg, fieldsAndTermsIteratorOpts{
+ err = iter.Reset(reader, fieldsAndTermsIteratorOpts{
iterateTerms: true,
allowFn: func(f []byte) bool {
return !bytes.Equal([]byte("a"), f) && !bytes.Equal([]byte("k"), f)
@@ -90,7 +96,7 @@ func TestFieldsTermsIteratorReuse(t *testing.T) {
pair{"i", "j"},
}, slice)
- err = iter.Reset(seg, fieldsAndTermsIteratorOpts{
+ err = iter.Reset(reader, fieldsAndTermsIteratorOpts{
iterateTerms: true,
allowFn: func(f []byte) bool {
return bytes.Equal([]byte("k"), f) || bytes.Equal([]byte("a"), f)
@@ -113,9 +119,10 @@ func TestFieldsTermsIteratorSimpleSkip(t *testing.T) {
pair{"k", "l"},
}
s := newFieldsTermsIterSetup(input...)
- seg := s.asSegment(t)
+ reader, err := s.asSegment(t).Reader()
+ require.NoError(t, err)
- iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{
iterateTerms: true,
allowFn: func(f []byte) bool {
return !bytes.Equal([]byte("a"), f) && !bytes.Equal([]byte("k"), f)
@@ -138,9 +145,10 @@ func TestFieldsTermsIteratorTermsOnly(t *testing.T) {
pair{"i", "j"},
pair{"k", "l"},
)
- seg := s.asSegment(t)
+ reader, err := s.asSegment(t).Reader()
+ require.NoError(t, err)
- iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{})
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{})
require.NoError(t, err)
slice := toSlice(t, iter)
requireSlicesEqual(t, []pair{
@@ -152,10 +160,10 @@ func TestFieldsTermsIteratorEmptyTerm(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
- seg := newMockSegment(ctrl, map[string][]string{
+ reader := newMockSegmentReader(ctrl, map[string][]string{
"a": nil,
})
- iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{iterateTerms: false})
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{iterateTerms: false})
require.NoError(t, err)
slice := toSlice(t, iter)
requireSlicesEqual(t, []pair{pair{"a", ""}}, slice)
@@ -165,16 +173,91 @@ func TestFieldsTermsIteratorEmptyTermInclude(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
- seg := newMockSegment(ctrl, map[string][]string{
+ reader := newMockSegmentReader(ctrl, map[string][]string{
"a": nil,
})
- iter, err := newFieldsAndTermsIterator(seg, fieldsAndTermsIteratorOpts{iterateTerms: true})
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{iterateTerms: true})
require.NoError(t, err)
slice := toSlice(t, iter)
requireSlicesEqual(t, []pair{}, slice)
}
-func newMockSegment(ctrl *gomock.Controller, tagValues map[string][]string) segment.Segment {
+func TestFieldsTermsIteratorIterateTermsAndRestrictByQuery(t *testing.T) {
+ testDocs := []doc.Document{
+ doc.Document{
+ Fields: []doc.Field{
+ doc.Field{
+ Name: []byte("fruit"),
+ Value: []byte("banana"),
+ },
+ doc.Field{
+ Name: []byte("color"),
+ Value: []byte("yellow"),
+ },
+ },
+ },
+ doc.Document{
+ Fields: []doc.Field{
+ doc.Field{
+ Name: []byte("fruit"),
+ Value: []byte("apple"),
+ },
+ doc.Field{
+ Name: []byte("color"),
+ Value: []byte("red"),
+ },
+ },
+ },
+ doc.Document{
+ Fields: []doc.Field{
+ doc.Field{
+ Name: []byte("fruit"),
+ Value: []byte("pineapple"),
+ },
+ doc.Field{
+ Name: []byte("color"),
+ Value: []byte("yellow"),
+ },
+ },
+ },
+ }
+
+ seg, err := mem.NewSegment(mem.NewOptions())
+ require.NoError(t, err)
+
+ require.NoError(t, seg.InsertBatch(m3ninxindex.Batch{
+ Docs: testDocs,
+ AllowPartialUpdates: true,
+ }))
+
+ require.NoError(t, seg.Seal())
+
+ fruitRegexp, err := idx.NewRegexpQuery([]byte("fruit"), []byte("^.*apple$"))
+ require.NoError(t, err)
+
+ colorRegexp, err := idx.NewRegexpQuery([]byte("color"), []byte("^(red|yellow)$"))
+ require.NoError(t, err)
+
+ reader, err := seg.Reader()
+ require.NoError(t, err)
+
+ iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{
+ iterateTerms: true,
+ restrictByQuery: &Query{
+ Query: idx.NewConjunctionQuery(fruitRegexp, colorRegexp),
+ },
+ })
+ require.NoError(t, err)
+ slice := toSlice(t, iter)
+ requireSlicesEqual(t, []pair{
+ pair{"color", "red"},
+ pair{"color", "yellow"},
+ pair{"fruit", "apple"},
+ pair{"fruit", "pineapple"},
+ }, slice)
+}
+
+func newMockSegmentReader(ctrl *gomock.Controller, tagValues map[string][]string) segment.Reader {
fields := make([]iterpoint, 0, len(tagValues))
for k := range tagValues {
fields = append(fields, iterpoint{
@@ -185,14 +268,10 @@ func newMockSegment(ctrl *gomock.Controller, tagValues map[string][]string) segm
return strings.Compare(fields[i].value, fields[j].value) < 0
})
- s := segment.NewMockSegment(ctrl)
- fieldIterable := segment.NewMockFieldsIterable(ctrl)
+ r := segment.NewMockReader(ctrl)
fieldIterator := &stubFieldIterator{points: fields}
- termsIterable := segment.NewMockTermsIterable(ctrl)
- s.EXPECT().FieldsIterable().Return(fieldIterable).AnyTimes()
- s.EXPECT().TermsIterable().Return(termsIterable).AnyTimes()
- fieldIterable.EXPECT().Fields().Return(fieldIterator, nil).AnyTimes()
+ r.EXPECT().Fields().Return(fieldIterator, nil).AnyTimes()
for _, f := range fields {
termValues := tagValues[f.value]
@@ -204,10 +283,10 @@ func newMockSegment(ctrl *gomock.Controller, tagValues map[string][]string) segm
})
}
termIterator := &stubTermIterator{points: terms}
- termsIterable.EXPECT().Terms([]byte(f.value)).Return(termIterator, nil).AnyTimes()
+ r.EXPECT().Terms([]byte(f.value)).Return(termIterator, nil).AnyTimes()
}
- return s
+ return r
}
type stubTermIterator struct {
diff --git a/src/dbnode/storage/index/filter_fields_iterator.go b/src/dbnode/storage/index/filter_fields_iterator.go
index 237d3b0810..96ac7570e2 100644
--- a/src/dbnode/storage/index/filter_fields_iterator.go
+++ b/src/dbnode/storage/index/filter_fields_iterator.go
@@ -31,21 +31,21 @@ var (
)
func newFilterFieldsIterator(
- seg segment.Segment,
+ reader segment.Reader,
fields AggregateFieldFilter,
) (segment.FieldsIterator, error) {
if len(fields) == 0 {
return nil, errNoFiltersSpecified
}
return &filterFieldsIterator{
- seg: seg,
+ reader: reader,
fields: fields,
currentIdx: -1,
}, nil
}
type filterFieldsIterator struct {
- seg segment.Segment
+ reader segment.Reader
fields AggregateFieldFilter
err error
@@ -63,7 +63,7 @@ func (f *filterFieldsIterator) Next() bool {
for f.currentIdx < len(f.fields) {
field := f.fields[f.currentIdx]
- ok, err := f.seg.ContainsField(field)
+ ok, err := f.reader.ContainsField(field)
if err != nil {
f.err = err
return false
diff --git a/src/dbnode/storage/index/filter_fields_iterator_test.go b/src/dbnode/storage/index/filter_fields_iterator_test.go
index 62e8f09be9..9e584011a4 100644
--- a/src/dbnode/storage/index/filter_fields_iterator_test.go
+++ b/src/dbnode/storage/index/filter_fields_iterator_test.go
@@ -31,42 +31,42 @@ import (
)
func TestNewFilterFieldsIteratorError(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
- s := segment.NewMockSegment(ctrl)
- _, err := newFilterFieldsIterator(s, nil)
+ r := segment.NewMockReader(ctrl)
+ _, err := newFilterFieldsIterator(r, nil)
require.Error(t, err)
}
func TestNewFilterFieldsIteratorNoMatchesInSegment(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
filters := AggregateFieldFilter{[]byte("a"), []byte("b")}
- s := segment.NewMockSegment(ctrl)
- iter, err := newFilterFieldsIterator(s, filters)
+ r := segment.NewMockReader(ctrl)
+ iter, err := newFilterFieldsIterator(r, filters)
require.NoError(t, err)
- s.EXPECT().ContainsField(gomock.Any()).Return(false, nil).AnyTimes()
+ r.EXPECT().ContainsField(gomock.Any()).Return(false, nil).AnyTimes()
require.False(t, iter.Next())
require.NoError(t, iter.Err())
require.NoError(t, iter.Close())
}
func TestNewFilterFieldsIteratorFirstMatch(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
filters := AggregateFieldFilter{[]byte("a"), []byte("b"), []byte("c")}
- s := segment.NewMockSegment(ctrl)
- iter, err := newFilterFieldsIterator(s, filters)
+ r := segment.NewMockReader(ctrl)
+ iter, err := newFilterFieldsIterator(r, filters)
require.NoError(t, err)
gomock.InOrder(
- s.EXPECT().ContainsField([]byte("a")).Return(true, nil),
- s.EXPECT().ContainsField([]byte("b")).Return(false, nil),
- s.EXPECT().ContainsField([]byte("c")).Return(false, nil),
+ r.EXPECT().ContainsField([]byte("a")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("b")).Return(false, nil),
+ r.EXPECT().ContainsField([]byte("c")).Return(false, nil),
)
require.True(t, iter.Next())
require.Equal(t, "a", string(iter.Current()))
@@ -76,18 +76,18 @@ func TestNewFilterFieldsIteratorFirstMatch(t *testing.T) {
}
func TestNewFilterFieldsIteratorMiddleMatch(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
filters := AggregateFieldFilter{[]byte("a"), []byte("b"), []byte("c")}
- s := segment.NewMockSegment(ctrl)
- iter, err := newFilterFieldsIterator(s, filters)
+ r := segment.NewMockReader(ctrl)
+ iter, err := newFilterFieldsIterator(r, filters)
require.NoError(t, err)
gomock.InOrder(
- s.EXPECT().ContainsField([]byte("a")).Return(false, nil),
- s.EXPECT().ContainsField([]byte("b")).Return(true, nil),
- s.EXPECT().ContainsField([]byte("c")).Return(false, nil),
+ r.EXPECT().ContainsField([]byte("a")).Return(false, nil),
+ r.EXPECT().ContainsField([]byte("b")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("c")).Return(false, nil),
)
require.True(t, iter.Next())
require.Equal(t, "b", string(iter.Current()))
@@ -97,18 +97,18 @@ func TestNewFilterFieldsIteratorMiddleMatch(t *testing.T) {
}
func TestNewFilterFieldsIteratorEndMatch(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
filters := AggregateFieldFilter{[]byte("a"), []byte("b"), []byte("c")}
- s := segment.NewMockSegment(ctrl)
- iter, err := newFilterFieldsIterator(s, filters)
+ r := segment.NewMockReader(ctrl)
+ iter, err := newFilterFieldsIterator(r, filters)
require.NoError(t, err)
gomock.InOrder(
- s.EXPECT().ContainsField([]byte("a")).Return(false, nil),
- s.EXPECT().ContainsField([]byte("b")).Return(false, nil),
- s.EXPECT().ContainsField([]byte("c")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("a")).Return(false, nil),
+ r.EXPECT().ContainsField([]byte("b")).Return(false, nil),
+ r.EXPECT().ContainsField([]byte("c")).Return(true, nil),
)
require.True(t, iter.Next())
require.Equal(t, "c", string(iter.Current()))
@@ -118,18 +118,18 @@ func TestNewFilterFieldsIteratorEndMatch(t *testing.T) {
}
func TestNewFilterFieldsIteratorAllMatch(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
filters := AggregateFieldFilter{[]byte("a"), []byte("b"), []byte("c")}
- s := segment.NewMockSegment(ctrl)
- iter, err := newFilterFieldsIterator(s, filters)
+ r := segment.NewMockReader(ctrl)
+ iter, err := newFilterFieldsIterator(r, filters)
require.NoError(t, err)
gomock.InOrder(
- s.EXPECT().ContainsField([]byte("a")).Return(true, nil),
- s.EXPECT().ContainsField([]byte("b")).Return(true, nil),
- s.EXPECT().ContainsField([]byte("c")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("a")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("b")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("c")).Return(true, nil),
)
require.True(t, iter.Next())
require.Equal(t, "a", string(iter.Current()))
@@ -143,18 +143,18 @@ func TestNewFilterFieldsIteratorAllMatch(t *testing.T) {
}
func TestNewFilterFieldsIteratorRandomMatch(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
filters := AggregateFieldFilter{[]byte("a"), []byte("b"), []byte("c")}
- s := segment.NewMockSegment(ctrl)
- iter, err := newFilterFieldsIterator(s, filters)
+ r := segment.NewMockReader(ctrl)
+ iter, err := newFilterFieldsIterator(r, filters)
require.NoError(t, err)
gomock.InOrder(
- s.EXPECT().ContainsField([]byte("a")).Return(true, nil),
- s.EXPECT().ContainsField([]byte("b")).Return(false, nil),
- s.EXPECT().ContainsField([]byte("c")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("a")).Return(true, nil),
+ r.EXPECT().ContainsField([]byte("b")).Return(false, nil),
+ r.EXPECT().ContainsField([]byte("c")).Return(true, nil),
)
require.True(t, iter.Next())
require.Equal(t, "a", string(iter.Current()))
diff --git a/src/dbnode/storage/index/index_mock.go b/src/dbnode/storage/index/index_mock.go
index b1bf677e05..3498361b80 100644
--- a/src/dbnode/storage/index/index_mock.go
+++ b/src/dbnode/storage/index/index_mock.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
+ "github.com/m3db/m3/src/dbnode/storage/stats"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
@@ -99,13 +100,28 @@ func (mr *MockBaseResultsMockRecorder) Size() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockBaseResults)(nil).Size))
}
+// TotalDocsCount mocks base method
+func (m *MockBaseResults) TotalDocsCount() int {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TotalDocsCount")
+ ret0, _ := ret[0].(int)
+ return ret0
+}
+
+// TotalDocsCount indicates an expected call of TotalDocsCount
+func (mr *MockBaseResultsMockRecorder) TotalDocsCount() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalDocsCount", reflect.TypeOf((*MockBaseResults)(nil).TotalDocsCount))
+}
+
// AddDocuments mocks base method
-func (m *MockBaseResults) AddDocuments(batch []doc.Document) (int, error) {
+func (m *MockBaseResults) AddDocuments(batch []doc.Document) (int, int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDocuments", batch)
ret0, _ := ret[0].(int)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(int)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
// AddDocuments indicates an expected call of AddDocuments
@@ -177,13 +193,28 @@ func (mr *MockQueryResultsMockRecorder) Size() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockQueryResults)(nil).Size))
}
+// TotalDocsCount mocks base method
+func (m *MockQueryResults) TotalDocsCount() int {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TotalDocsCount")
+ ret0, _ := ret[0].(int)
+ return ret0
+}
+
+// TotalDocsCount indicates an expected call of TotalDocsCount
+func (mr *MockQueryResultsMockRecorder) TotalDocsCount() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalDocsCount", reflect.TypeOf((*MockQueryResults)(nil).TotalDocsCount))
+}
+
// AddDocuments mocks base method
-func (m *MockQueryResults) AddDocuments(batch []doc.Document) (int, error) {
+func (m *MockQueryResults) AddDocuments(batch []doc.Document) (int, int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDocuments", batch)
ret0, _ := ret[0].(int)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(int)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
// AddDocuments indicates an expected call of AddDocuments
@@ -342,13 +373,28 @@ func (mr *MockAggregateResultsMockRecorder) Size() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockAggregateResults)(nil).Size))
}
+// TotalDocsCount mocks base method
+func (m *MockAggregateResults) TotalDocsCount() int {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TotalDocsCount")
+ ret0, _ := ret[0].(int)
+ return ret0
+}
+
+// TotalDocsCount indicates an expected call of TotalDocsCount
+func (mr *MockAggregateResultsMockRecorder) TotalDocsCount() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalDocsCount", reflect.TypeOf((*MockAggregateResults)(nil).TotalDocsCount))
+}
+
// AddDocuments mocks base method
-func (m *MockAggregateResults) AddDocuments(batch []doc.Document) (int, error) {
+func (m *MockAggregateResults) AddDocuments(batch []doc.Document) (int, int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDocuments", batch)
ret0, _ := ret[0].(int)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(int)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
// AddDocuments indicates an expected call of AddDocuments
@@ -396,11 +442,12 @@ func (mr *MockAggregateResultsMockRecorder) AggregateResultsOptions() *gomock.Ca
}
// AddFields mocks base method
-func (m *MockAggregateResults) AddFields(batch []AggregateResultsEntry) int {
+func (m *MockAggregateResults) AddFields(batch []AggregateResultsEntry) (int, int) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddFields", batch)
ret0, _ := ret[0].(int)
- return ret0
+ ret1, _ := ret[1].(int)
+ return ret0, ret1
}
// AddFields indicates an expected call of AddFields
@@ -715,17 +762,17 @@ func (mr *MockBlockMockRecorder) Aggregate(ctx, cancellable, opts, results, logF
}
// AddResults mocks base method
-func (m *MockBlock) AddResults(results result.IndexBlock) error {
+func (m *MockBlock) AddResults(resultsByVolumeType result.IndexBlockByVolumeType) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AddResults", results)
+ ret := m.ctrl.Call(m, "AddResults", resultsByVolumeType)
ret0, _ := ret[0].(error)
return ret0
}
// AddResults indicates an expected call of AddResults
-func (mr *MockBlockMockRecorder) AddResults(results interface{}) *gomock.Call {
+func (mr *MockBlockMockRecorder) AddResults(resultsByVolumeType interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddResults", reflect.TypeOf((*MockBlock)(nil).AddResults), results)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddResults", reflect.TypeOf((*MockBlock)(nil).AddResults), resultsByVolumeType)
}
// Tick mocks base method
@@ -813,6 +860,61 @@ func (mr *MockBlockMockRecorder) EvictMutableSegments() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EvictMutableSegments", reflect.TypeOf((*MockBlock)(nil).EvictMutableSegments))
}
+// NeedsColdMutableSegmentsEvicted mocks base method
+func (m *MockBlock) NeedsColdMutableSegmentsEvicted() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NeedsColdMutableSegmentsEvicted")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// NeedsColdMutableSegmentsEvicted indicates an expected call of NeedsColdMutableSegmentsEvicted
+func (mr *MockBlockMockRecorder) NeedsColdMutableSegmentsEvicted() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NeedsColdMutableSegmentsEvicted", reflect.TypeOf((*MockBlock)(nil).NeedsColdMutableSegmentsEvicted))
+}
+
+// EvictColdMutableSegments mocks base method
+func (m *MockBlock) EvictColdMutableSegments() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EvictColdMutableSegments")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// EvictColdMutableSegments indicates an expected call of EvictColdMutableSegments
+func (mr *MockBlockMockRecorder) EvictColdMutableSegments() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EvictColdMutableSegments", reflect.TypeOf((*MockBlock)(nil).EvictColdMutableSegments))
+}
+
+// RotateColdMutableSegments mocks base method
+func (m *MockBlock) RotateColdMutableSegments() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "RotateColdMutableSegments")
+}
+
+// RotateColdMutableSegments indicates an expected call of RotateColdMutableSegments
+func (mr *MockBlockMockRecorder) RotateColdMutableSegments() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RotateColdMutableSegments", reflect.TypeOf((*MockBlock)(nil).RotateColdMutableSegments))
+}
+
+// MemorySegmentsData mocks base method
+func (m *MockBlock) MemorySegmentsData(ctx context.Context) ([]fst.SegmentData, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MemorySegmentsData", ctx)
+ ret0, _ := ret[0].([]fst.SegmentData)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MemorySegmentsData indicates an expected call of MemorySegmentsData
+func (mr *MockBlockMockRecorder) MemorySegmentsData(ctx interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MemorySegmentsData", reflect.TypeOf((*MockBlock)(nil).MemorySegmentsData), ctx)
+}
+
// Close mocks base method
func (m *MockBlock) Close() error {
m.ctrl.T.Helper()
@@ -862,6 +964,18 @@ func (mr *MockBlockStatsReporterMockRecorder) ReportSegmentStats(stats interface
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportSegmentStats", reflect.TypeOf((*MockBlockStatsReporter)(nil).ReportSegmentStats), stats)
}
+// ReportIndexingStats mocks base method
+func (m *MockBlockStatsReporter) ReportIndexingStats(stats BlockIndexingStats) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ReportIndexingStats", stats)
+}
+
+// ReportIndexingStats indicates an expected call of ReportIndexingStats
+func (mr *MockBlockStatsReporterMockRecorder) ReportIndexingStats(stats interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportIndexingStats", reflect.TypeOf((*MockBlockStatsReporter)(nil).ReportIndexingStats), stats)
+}
+
// MockfieldsAndTermsIterator is a mock of fieldsAndTermsIterator interface
type MockfieldsAndTermsIterator struct {
ctrl *gomock.Controller
@@ -943,17 +1057,17 @@ func (mr *MockfieldsAndTermsIteratorMockRecorder) Close() *gomock.Call {
}
// Reset mocks base method
-func (m *MockfieldsAndTermsIterator) Reset(seg segment.Segment, opts fieldsAndTermsIteratorOpts) error {
+func (m *MockfieldsAndTermsIterator) Reset(reader segment.Reader, opts fieldsAndTermsIteratorOpts) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Reset", seg, opts)
+ ret := m.ctrl.Call(m, "Reset", reader, opts)
ret0, _ := ret[0].(error)
return ret0
}
// Reset indicates an expected call of Reset
-func (mr *MockfieldsAndTermsIteratorMockRecorder) Reset(seg, opts interface{}) *gomock.Call {
+func (mr *MockfieldsAndTermsIteratorMockRecorder) Reset(reader, opts interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockfieldsAndTermsIterator)(nil).Reset), seg, opts)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockfieldsAndTermsIterator)(nil).Reset), reader, opts)
}
// MockOptions is a mock of Options interface
@@ -1552,3 +1666,31 @@ func (mr *MockOptionsMockRecorder) MmapReporter() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MmapReporter", reflect.TypeOf((*MockOptions)(nil).MmapReporter))
}
+
+// SetQueryStats mocks base method
+func (m *MockOptions) SetQueryStats(value stats.QueryStats) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetQueryStats", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetQueryStats indicates an expected call of SetQueryStats
+func (mr *MockOptionsMockRecorder) SetQueryStats(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetQueryStats", reflect.TypeOf((*MockOptions)(nil).SetQueryStats), value)
+}
+
+// QueryStats mocks base method
+func (m *MockOptions) QueryStats() stats.QueryStats {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "QueryStats")
+ ret0, _ := ret[0].(stats.QueryStats)
+ return ret0
+}
+
+// QueryStats indicates an expected call of QueryStats
+func (mr *MockOptionsMockRecorder) QueryStats() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryStats", reflect.TypeOf((*MockOptions)(nil).QueryStats))
+}
diff --git a/src/dbnode/storage/index/mutable_segments.go b/src/dbnode/storage/index/mutable_segments.go
new file mode 100644
index 0000000000..baa904cd7b
--- /dev/null
+++ b/src/dbnode/storage/index/mutable_segments.go
@@ -0,0 +1,841 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package index
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/storage/index/compaction"
+ "github.com/m3db/m3/src/dbnode/storage/index/segments"
+ m3ninxindex "github.com/m3db/m3/src/m3ninx/index"
+ "github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/m3ninx/index/segment/builder"
+ "github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/context"
+ "github.com/m3db/m3/src/x/instrument"
+ "github.com/m3db/m3/src/x/mmap"
+
+ "github.com/uber-go/tally"
+ "go.uber.org/zap"
+)
+
+var (
+ errUnableToWriteBlockConcurrent = errors.New("unable to write, index block is being written to already")
+ errMutableSegmentsAlreadyClosed = errors.New("mutable segments already closed")
+ errForegroundCompactorNoPlan = errors.New("index foreground compactor failed to generate a plan")
+ errForegroundCompactorBadPlanFirstTask = errors.New("index foreground compactor generated plan without mutable segment in first task")
+ errForegroundCompactorBadPlanSecondaryTask = errors.New("index foreground compactor generated plan with mutable segment a secondary task")
+)
+
+type mutableSegmentsState uint
+
+const (
+ mutableSegmentsStateOpen mutableSegmentsState = iota
+ mutableSegmentsStateClosed mutableSegmentsState = iota
+)
+
+// nolint: maligned
+type mutableSegments struct {
+ sync.RWMutex
+
+ state mutableSegmentsState
+
+ foregroundSegments []*readableSeg
+ backgroundSegments []*readableSeg
+
+ compact mutableSegmentsCompact
+ blockStart time.Time
+ blockOpts BlockOptions
+ opts Options
+ iopts instrument.Options
+ optsListener xclose.SimpleCloser
+ writeIndexingConcurrency int
+
+ metrics mutableSegmentsMetrics
+ logger *zap.Logger
+}
+
+type mutableSegmentsMetrics struct {
+ foregroundCompactionPlanRunLatency tally.Timer
+ foregroundCompactionTaskRunLatency tally.Timer
+ backgroundCompactionPlanRunLatency tally.Timer
+ backgroundCompactionTaskRunLatency tally.Timer
+}
+
+func newMutableSegmentsMetrics(s tally.Scope) mutableSegmentsMetrics {
+ foregroundScope := s.Tagged(map[string]string{"compaction-type": "foreground"})
+ backgroundScope := s.Tagged(map[string]string{"compaction-type": "background"})
+ return mutableSegmentsMetrics{
+ foregroundCompactionPlanRunLatency: foregroundScope.Timer("compaction-plan-run-latency"),
+ foregroundCompactionTaskRunLatency: foregroundScope.Timer("compaction-task-run-latency"),
+ backgroundCompactionPlanRunLatency: backgroundScope.Timer("compaction-plan-run-latency"),
+ backgroundCompactionTaskRunLatency: backgroundScope.Timer("compaction-task-run-latency"),
+ }
+}
+
+// NewBlock returns a new Block, representing a complete reverse index for the
+// duration of time specified. It is backed by one or more segments.
+func newMutableSegments(
+ blockStart time.Time,
+ opts Options,
+ blockOpts BlockOptions,
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,
+ iopts instrument.Options,
+) *mutableSegments {
+ m := &mutableSegments{
+ blockStart: blockStart,
+ opts: opts,
+ blockOpts: blockOpts,
+ iopts: iopts,
+ metrics: newMutableSegmentsMetrics(iopts.MetricsScope()),
+ logger: iopts.Logger(),
+ }
+ m.optsListener = namespaceRuntimeOptsMgr.RegisterListener(m)
+ return m
+}
+
+func (m *mutableSegments) SetNamespaceRuntimeOptions(opts namespace.RuntimeOptions) {
+ m.Lock()
+ // Update current runtime opts for segment builders created in future.
+ perCPUFraction := opts.WriteIndexingPerCPUConcurrencyOrDefault()
+ cpus := math.Ceil(perCPUFraction * float64(runtime.NumCPU()))
+ m.writeIndexingConcurrency = int(math.Max(1, cpus))
+ segmentBuilder := m.compact.segmentBuilder
+ m.Unlock()
+
+ // Reset any existing segment builder to new concurrency, do this
+ // out of the lock since builder can be used for foreground compaction
+ // outside the lock and does it's own locking.
+ if segmentBuilder != nil {
+ segmentBuilder.SetIndexConcurrency(m.writeIndexingConcurrency)
+ }
+
+ // Set the global concurrency control we have (we may need to fork
+ // github.com/twotwotwo/sorts to control this on a per segment builder
+ // basis).
+ builder.SetSortConcurrency(m.writeIndexingConcurrency)
+}
+
+func (m *mutableSegments) WriteBatch(inserts *WriteBatch) error {
+ m.Lock()
+ if m.state == mutableSegmentsStateClosed {
+ return errMutableSegmentsAlreadyClosed
+ }
+
+ if m.compact.compactingForeground {
+ m.Unlock()
+ return errUnableToWriteBlockConcurrent
+ }
+
+ // Lazily allocate the segment builder and compactors.
+ err := m.compact.allocLazyBuilderAndCompactorsWithLock(m.writeIndexingConcurrency,
+ m.blockOpts, m.opts)
+ if err != nil {
+ m.Unlock()
+ return err
+ }
+
+ m.compact.compactingForeground = true
+ builder := m.compact.segmentBuilder
+ m.Unlock()
+
+ defer func() {
+ m.Lock()
+ m.compact.compactingForeground = false
+ m.cleanupForegroundCompactWithLock()
+ m.Unlock()
+ }()
+
+ builder.Reset()
+ insertResultErr := builder.InsertBatch(m3ninxindex.Batch{
+ Docs: inserts.PendingDocs(),
+ AllowPartialUpdates: true,
+ })
+ if len(builder.Docs()) == 0 {
+ // No inserts, no need to compact.
+ return insertResultErr
+ }
+
+ // We inserted some documents, need to compact immediately into a
+ // foreground segment from the segment builder before we can serve reads
+ // from an FST segment.
+ err = m.foregroundCompactWithBuilder(builder)
+ if err != nil {
+ return err
+ }
+
+ // Return result from the original insertion since compaction was successful.
+ return insertResultErr
+}
+
+func (m *mutableSegments) AddReaders(readers []segment.Reader) ([]segment.Reader, error) {
+ m.RLock()
+ defer m.RUnlock()
+
+ var err error
+ readers, err = m.addReadersWithLock(m.foregroundSegments, readers)
+ if err != nil {
+ return nil, err
+ }
+
+ readers, err = m.addReadersWithLock(m.backgroundSegments, readers)
+ if err != nil {
+ return nil, err
+ }
+
+ return readers, nil
+}
+
+func (m *mutableSegments) addReadersWithLock(src []*readableSeg, dst []segment.Reader) ([]segment.Reader, error) {
+ for _, seg := range src {
+ reader, err := seg.Segment().Reader()
+ if err != nil {
+ return nil, err
+ }
+ dst = append(dst, reader)
+ }
+ return dst, nil
+}
+
+func (m *mutableSegments) Len() int {
+ m.RLock()
+ defer m.RUnlock()
+
+ return len(m.foregroundSegments) + len(m.backgroundSegments)
+}
+
+func (m *mutableSegments) MemorySegmentsData(ctx context.Context) ([]fst.SegmentData, error) {
+ m.RLock()
+ defer m.RUnlock()
+
+ // NB(r): This is for debug operations, do not bother about allocations.
+ var results []fst.SegmentData
+ for _, segs := range [][]*readableSeg{
+ m.foregroundSegments,
+ m.backgroundSegments,
+ } {
+ for _, seg := range segs {
+ fstSegment, ok := seg.Segment().(fst.Segment)
+ if !ok {
+ return nil, fmt.Errorf("segment not fst segment: created=%v", seg.createdAt)
+ }
+
+ segmentData, err := fstSegment.SegmentData(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ results = append(results, segmentData)
+ }
+ }
+ return results, nil
+}
+
+func (m *mutableSegments) NeedsEviction() bool {
+ m.RLock()
+ defer m.RUnlock()
+
+ var needsEviction bool
+ for _, seg := range m.foregroundSegments {
+ needsEviction = needsEviction || seg.Segment().Size() > 0
+ }
+ for _, seg := range m.backgroundSegments {
+ needsEviction = needsEviction || seg.Segment().Size() > 0
+ }
+ return needsEviction
+}
+
+func (m *mutableSegments) NumSegmentsAndDocs() (int64, int64) {
+ m.RLock()
+ defer m.RUnlock()
+
+ var (
+ numSegments, numDocs int64
+ )
+ for _, seg := range m.foregroundSegments {
+ numSegments++
+ numDocs += seg.Segment().Size()
+ }
+ for _, seg := range m.backgroundSegments {
+ numSegments++
+ numDocs += seg.Segment().Size()
+ }
+ return numSegments, numDocs
+}
+
+func (m *mutableSegments) Stats(reporter BlockStatsReporter) {
+ m.RLock()
+ defer m.RUnlock()
+
+ for _, seg := range m.foregroundSegments {
+ _, mutable := seg.Segment().(segment.MutableSegment)
+ reporter.ReportSegmentStats(BlockSegmentStats{
+ Type: ActiveForegroundSegment,
+ Mutable: mutable,
+ Age: seg.Age(),
+ Size: seg.Segment().Size(),
+ })
+ }
+ for _, seg := range m.backgroundSegments {
+ _, mutable := seg.Segment().(segment.MutableSegment)
+ reporter.ReportSegmentStats(BlockSegmentStats{
+ Type: ActiveBackgroundSegment,
+ Mutable: mutable,
+ Age: seg.Age(),
+ Size: seg.Segment().Size(),
+ })
+ }
+
+ reporter.ReportIndexingStats(BlockIndexingStats{
+ IndexConcurrency: m.writeIndexingConcurrency,
+ })
+}
+
+func (m *mutableSegments) Close() {
+ m.Lock()
+ defer m.Unlock()
+ m.state = mutableSegmentsStateClosed
+ m.cleanupCompactWithLock()
+ m.optsListener.Close()
+}
+
+func (m *mutableSegments) maybeBackgroundCompactWithLock() {
+ if m.compact.compactingBackground {
+ return
+ }
+
+ // Create a logical plan.
+ segs := make([]compaction.Segment, 0, len(m.backgroundSegments))
+ for _, seg := range m.backgroundSegments {
+ segs = append(segs, compaction.Segment{
+ Age: seg.Age(),
+ Size: seg.Segment().Size(),
+ Type: segments.FSTType,
+ Segment: seg.Segment(),
+ })
+ }
+
+ plan, err := compaction.NewPlan(segs, m.opts.BackgroundCompactionPlannerOptions())
+ if err != nil {
+ instrument.EmitAndLogInvariantViolation(m.iopts, func(l *zap.Logger) {
+ l.Error("index background compaction plan error", zap.Error(err))
+ })
+ return
+ }
+
+ if len(plan.Tasks) == 0 {
+ return
+ }
+
+ // Kick off compaction.
+ m.compact.compactingBackground = true
+ go func() {
+ m.backgroundCompactWithPlan(plan)
+
+ m.Lock()
+ m.compact.compactingBackground = false
+ m.cleanupBackgroundCompactWithLock()
+ m.Unlock()
+ }()
+}
+
+func (m *mutableSegments) shouldEvictCompactedSegmentsWithLock() bool {
+ return m.state == mutableSegmentsStateClosed
+}
+
+func (m *mutableSegments) cleanupBackgroundCompactWithLock() {
+ if m.state == mutableSegmentsStateOpen {
+ // See if we need to trigger another compaction.
+ m.maybeBackgroundCompactWithLock()
+ return
+ }
+
+ // Check if need to close all the compacted segments due to
+ // mutableSegments being closed.
+ if !m.shouldEvictCompactedSegmentsWithLock() {
+ return
+ }
+
+ // Close compacted segments.
+ m.closeCompactedSegmentsWithLock(m.backgroundSegments)
+ m.backgroundSegments = nil
+
+ // Free compactor resources.
+ if m.compact.backgroundCompactor == nil {
+ return
+ }
+
+ if err := m.compact.backgroundCompactor.Close(); err != nil {
+ instrument.EmitAndLogInvariantViolation(m.iopts, func(l *zap.Logger) {
+ l.Error("error closing index block background compactor", zap.Error(err))
+ })
+ }
+ m.compact.backgroundCompactor = nil
+}
+
+func (m *mutableSegments) closeCompactedSegmentsWithLock(segments []*readableSeg) {
+ for _, seg := range segments {
+ err := seg.Segment().Close()
+ if err != nil {
+ instrument.EmitAndLogInvariantViolation(m.iopts, func(l *zap.Logger) {
+ l.Error("could not close compacted segment", zap.Error(err))
+ })
+ }
+ }
+}
+
+func (m *mutableSegments) backgroundCompactWithPlan(plan *compaction.Plan) {
+ sw := m.metrics.backgroundCompactionPlanRunLatency.Start()
+ defer sw.Stop()
+
+ n := m.compact.numBackground
+ m.compact.numBackground++
+
+ logger := m.logger.With(
+ zap.Time("blockStart", m.blockStart),
+ zap.Int("numBackgroundCompaction", n),
+ )
+ log := n%compactDebugLogEvery == 0
+ if log {
+ for i, task := range plan.Tasks {
+ summary := task.Summary()
+ logger.Debug("planned background compaction task",
+ zap.Int("task", i),
+ zap.Int("numMutable", summary.NumMutable),
+ zap.Int("numFST", summary.NumFST),
+ zap.Stringer("cumulativeMutableAge", summary.CumulativeMutableAge),
+ zap.Int64("cumulativeSize", summary.CumulativeSize),
+ )
+ }
+ }
+
+ for i, task := range plan.Tasks {
+ err := m.backgroundCompactWithTask(task, log,
+ logger.With(zap.Int("task", i)))
+ if err != nil {
+ instrument.EmitAndLogInvariantViolation(m.iopts, func(l *zap.Logger) {
+ l.Error("error compacting segments", zap.Error(err))
+ })
+ return
+ }
+ }
+}
+
+func (m *mutableSegments) backgroundCompactWithTask(
+ task compaction.Task,
+ log bool,
+ logger *zap.Logger,
+) error {
+ if log {
+ logger.Debug("start compaction task")
+ }
+
+ segments := make([]segment.Segment, 0, len(task.Segments))
+ for _, seg := range task.Segments {
+ segments = append(segments, seg.Segment)
+ }
+
+ start := time.Now()
+ compacted, err := m.compact.backgroundCompactor.Compact(segments, mmap.ReporterOptions{
+ Context: mmap.Context{
+ Name: mmapIndexBlockName,
+ },
+ Reporter: m.opts.MmapReporter(),
+ })
+ took := time.Since(start)
+ m.metrics.backgroundCompactionTaskRunLatency.Record(took)
+
+ if log {
+ logger.Debug("done compaction task", zap.Duration("took", took))
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // Add a read through cache for repeated expensive queries against
+ // background compacted segments since they can live for quite some
+ // time and accrue a large set of documents.
+ if immSeg, ok := compacted.(segment.ImmutableSegment); ok {
+ var (
+ plCache = m.opts.PostingsListCache()
+ readThroughOpts = m.opts.ReadThroughSegmentOptions()
+ )
+ compacted = NewReadThroughSegment(immSeg, plCache, readThroughOpts)
+ }
+
+ // Rotate out the replaced frozen segments and add the compacted one.
+ m.Lock()
+ defer m.Unlock()
+
+ result := m.addCompactedSegmentFromSegmentsWithLock(m.backgroundSegments,
+ segments, compacted)
+ m.backgroundSegments = result
+
+ return nil
+}
+
+func (m *mutableSegments) addCompactedSegmentFromSegmentsWithLock(
+ current []*readableSeg,
+ segmentsJustCompacted []segment.Segment,
+ compacted segment.Segment,
+) []*readableSeg {
+ result := make([]*readableSeg, 0, len(current))
+ for _, existing := range current {
+ keepCurr := true
+ for _, seg := range segmentsJustCompacted {
+ if existing.Segment() == seg {
+ // Do not keep this one, it was compacted just then.
+ keepCurr = false
+ break
+ }
+ }
+
+ if keepCurr {
+ result = append(result, existing)
+ continue
+ }
+
+ err := existing.Segment().Close()
+ if err != nil {
+ // Already compacted, not much we can do about not closing it.
+ instrument.EmitAndLogInvariantViolation(m.iopts, func(l *zap.Logger) {
+ l.Error("unable to close compacted block", zap.Error(err))
+ })
+ }
+ }
+
+ // Return all the ones we kept plus the new compacted segment
+ return append(result, newReadableSeg(compacted, m.opts))
+}
+
+func (m *mutableSegments) foregroundCompactWithBuilder(builder segment.DocumentsBuilder) error {
+ // We inserted some documents, need to compact immediately into a
+ // foreground segment.
+ m.Lock()
+ foregroundSegments := m.foregroundSegments
+ m.Unlock()
+
+ segs := make([]compaction.Segment, 0, len(foregroundSegments)+1)
+ segs = append(segs, compaction.Segment{
+ Age: 0,
+ Size: int64(len(builder.Docs())),
+ Type: segments.MutableType,
+ Builder: builder,
+ })
+ for _, seg := range foregroundSegments {
+ segs = append(segs, compaction.Segment{
+ Age: seg.Age(),
+ Size: seg.Segment().Size(),
+ Type: segments.FSTType,
+ Segment: seg.Segment(),
+ })
+ }
+
+ plan, err := compaction.NewPlan(segs, m.opts.ForegroundCompactionPlannerOptions())
+ if err != nil {
+ return err
+ }
+
+ // Check plan
+ if len(plan.Tasks) == 0 {
+ // Should always generate a task when a mutable builder is passed to planner
+ return errForegroundCompactorNoPlan
+ }
+ if taskNumBuilders(plan.Tasks[0]) != 1 {
+ // First task of plan must include the builder, so we can avoid resetting it
+ // for the first task, but then safely reset it in consequent tasks
+ return errForegroundCompactorBadPlanFirstTask
+ }
+
+ // Move any unused segments to the background.
+ m.Lock()
+ m.maybeMoveForegroundSegmentsToBackgroundWithLock(plan.UnusedSegments)
+ m.Unlock()
+
+ n := m.compact.numForeground
+ m.compact.numForeground++
+
+ logger := m.logger.With(
+ zap.Time("blockStart", m.blockStart),
+ zap.Int("numForegroundCompaction", n),
+ )
+ log := n%compactDebugLogEvery == 0
+ if log {
+ for i, task := range plan.Tasks {
+ summary := task.Summary()
+ logger.Debug("planned foreground compaction task",
+ zap.Int("task", i),
+ zap.Int("numMutable", summary.NumMutable),
+ zap.Int("numFST", summary.NumFST),
+ zap.Duration("cumulativeMutableAge", summary.CumulativeMutableAge),
+ zap.Int64("cumulativeSize", summary.CumulativeSize),
+ )
+ }
+ }
+
+ // Run the plan.
+ sw := m.metrics.foregroundCompactionPlanRunLatency.Start()
+ defer sw.Stop()
+
+ // Run the first task, without resetting the builder.
+ if err := m.foregroundCompactWithTask(
+ builder, plan.Tasks[0],
+ log, logger.With(zap.Int("task", 0)),
+ ); err != nil {
+ return err
+ }
+
+ // Now run each consequent task, resetting the builder each time since
+ // the results from the builder have already been compacted in the first
+ // task.
+ for i := 1; i < len(plan.Tasks); i++ {
+ task := plan.Tasks[i]
+ if taskNumBuilders(task) > 0 {
+ // Only the first task should compact the builder
+ return errForegroundCompactorBadPlanSecondaryTask
+ }
+ // Now use the builder after resetting it.
+ builder.Reset()
+ if err := m.foregroundCompactWithTask(
+ builder, task,
+ log, logger.With(zap.Int("task", i)),
+ ); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *mutableSegments) maybeMoveForegroundSegmentsToBackgroundWithLock(
+ segments []compaction.Segment,
+) {
+ if len(segments) == 0 {
+ return
+ }
+ if m.compact.backgroundCompactor == nil {
+ // No longer performing background compaction due to evict/close.
+ return
+ }
+
+ m.logger.Debug("moving segments from foreground to background",
+ zap.Int("numSegments", len(segments)))
+
+ // If background compaction is still active, then we move any unused
+ // foreground segments into the background so that they might be
+ // compacted by the background compactor at some point.
+ i := 0
+ for _, currForeground := range m.foregroundSegments {
+ movedToBackground := false
+ for _, seg := range segments {
+ if currForeground.Segment() == seg.Segment {
+ m.backgroundSegments = append(m.backgroundSegments, currForeground)
+ movedToBackground = true
+ break
+ }
+ }
+ if movedToBackground {
+ continue // No need to keep this segment, we moved it.
+ }
+
+ m.foregroundSegments[i] = currForeground
+ i++
+ }
+
+ m.foregroundSegments = m.foregroundSegments[:i]
+
+ // Potentially kick off a background compaction.
+ m.maybeBackgroundCompactWithLock()
+}
+
+func (m *mutableSegments) foregroundCompactWithTask(
+ builder segment.DocumentsBuilder,
+ task compaction.Task,
+ log bool,
+ logger *zap.Logger,
+) error {
+ if log {
+ logger.Debug("start compaction task")
+ }
+
+ segments := make([]segment.Segment, 0, len(task.Segments))
+ for _, seg := range task.Segments {
+ if seg.Segment == nil {
+ continue // This means the builder is being used.
+ }
+ segments = append(segments, seg.Segment)
+ }
+
+ start := time.Now()
+ compacted, err := m.compact.foregroundCompactor.CompactUsingBuilder(builder, segments, mmap.ReporterOptions{
+ Context: mmap.Context{
+ Name: mmapIndexBlockName,
+ },
+ Reporter: m.opts.MmapReporter(),
+ })
+ took := time.Since(start)
+ m.metrics.foregroundCompactionTaskRunLatency.Record(took)
+
+ if log {
+ logger.Debug("done compaction task", zap.Duration("took", took))
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // Rotate in the ones we just compacted.
+ m.Lock()
+ defer m.Unlock()
+
+ result := m.addCompactedSegmentFromSegmentsWithLock(m.foregroundSegments,
+ segments, compacted)
+ m.foregroundSegments = result
+
+ return nil
+}
+
+func (m *mutableSegments) cleanupForegroundCompactWithLock() {
+ // Check if need to close all the compacted segments due to
+ // mutableSegments being closed.
+ if !m.shouldEvictCompactedSegmentsWithLock() {
+ return
+ }
+
+ // Close compacted segments.
+ m.closeCompactedSegmentsWithLock(m.foregroundSegments)
+ m.foregroundSegments = nil
+
+ // Free compactor resources.
+ if m.compact.foregroundCompactor != nil {
+ if err := m.compact.foregroundCompactor.Close(); err != nil {
+ instrument.EmitAndLogInvariantViolation(m.iopts, func(l *zap.Logger) {
+ l.Error("error closing index block foreground compactor", zap.Error(err))
+ })
+ }
+ m.compact.foregroundCompactor = nil
+ }
+
+ // Free segment builder resources.
+ if m.compact.segmentBuilder != nil {
+ if err := m.compact.segmentBuilder.Close(); err != nil {
+ instrument.EmitAndLogInvariantViolation(m.iopts, func(l *zap.Logger) {
+ l.Error("error closing index block segment builder", zap.Error(err))
+ })
+ }
+ m.compact.segmentBuilder = nil
+ }
+}
+func (m *mutableSegments) cleanupCompactWithLock() {
+ // If not compacting, trigger a cleanup so that all frozen segments get
+ // closed, otherwise after the current running compaction the compacted
+ // segments will get closed.
+ if !m.compact.compactingForeground {
+ m.cleanupForegroundCompactWithLock()
+ }
+ if !m.compact.compactingBackground {
+ m.cleanupBackgroundCompactWithLock()
+ }
+}
+
+// mutableSegmentsCompact has several lazily allocated compaction components.
+type mutableSegmentsCompact struct {
+ segmentBuilder segment.CloseableDocumentsBuilder
+ foregroundCompactor *compaction.Compactor
+ backgroundCompactor *compaction.Compactor
+ compactingForeground bool
+ compactingBackground bool
+ numForeground int
+ numBackground int
+}
+
+func (m *mutableSegmentsCompact) allocLazyBuilderAndCompactorsWithLock(
+ concurrency int,
+ blockOpts BlockOptions,
+ opts Options,
+) error {
+ var (
+ err error
+ docsPool = opts.DocumentArrayPool()
+ )
+ if m.segmentBuilder == nil {
+ builderOpts := opts.SegmentBuilderOptions().
+ SetConcurrency(concurrency)
+
+ m.segmentBuilder, err = builder.NewBuilderFromDocuments(builderOpts)
+ if err != nil {
+ return err
+ }
+ }
+
+ if m.foregroundCompactor == nil {
+ m.foregroundCompactor, err = compaction.NewCompactor(docsPool,
+ DocumentArrayPoolCapacity,
+ opts.SegmentBuilderOptions(),
+ opts.FSTSegmentOptions(),
+ compaction.CompactorOptions{
+ FSTWriterOptions: &fst.WriterOptions{
+ // DisableRegistry is set to true to trade a larger FST size
+ // for a faster FST compaction since we want to reduce the end
+ // to end latency for time to first index a metric.
+ DisableRegistry: true,
+ },
+ MmapDocsData: blockOpts.ForegroundCompactorMmapDocsData,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ if m.backgroundCompactor == nil {
+ m.backgroundCompactor, err = compaction.NewCompactor(docsPool,
+ DocumentArrayPoolCapacity,
+ opts.SegmentBuilderOptions(),
+ opts.FSTSegmentOptions(),
+ compaction.CompactorOptions{
+ MmapDocsData: blockOpts.BackgroundCompactorMmapDocsData,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func taskNumBuilders(task compaction.Task) int {
+ builders := 0
+ for _, seg := range task.Segments {
+ if seg.Builder != nil {
+ builders++
+ continue
+ }
+ }
+ return builders
+}
diff --git a/src/dbnode/storage/index/options.go b/src/dbnode/storage/index/options.go
index d0881133a1..15f7738d16 100644
--- a/src/dbnode/storage/index/options.go
+++ b/src/dbnode/storage/index/options.go
@@ -25,6 +25,7 @@ import (
"github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
+ "github.com/m3db/m3/src/dbnode/storage/stats"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
@@ -67,6 +68,7 @@ var (
errOptionsAggResultsEntryPoolUnspecified = errors.New("aggregate results entry array pool is unset")
errIDGenerationDisabled = errors.New("id generation is disabled")
errPostingsListCacheUnspecified = errors.New("postings list cache is unset")
+ errOptionsQueryStatsUnspecified = errors.New("query stats is unset")
defaultForegroundCompactionOpts compaction.PlannerOptions
defaultBackgroundCompactionOpts compaction.PlannerOptions
@@ -122,6 +124,7 @@ type opts struct {
postingsListCache *PostingsListCache
readThroughSegmentOptions ReadThroughSegmentOptions
mmapReporter mmap.Reporter
+ queryStats stats.QueryStats
}
var undefinedUUIDFn = func() ([]byte, error) { return nil, errIDGenerationDisabled }
@@ -172,6 +175,7 @@ func NewOptions() Options {
aggResultsEntryArrayPool: aggResultsEntryArrayPool,
foregroundCompactionPlannerOpts: defaultForegroundCompactionOpts,
backgroundCompactionPlannerOpts: defaultBackgroundCompactionOpts,
+ queryStats: stats.NoOpQueryStats(),
}
resultsPool.Init(func() QueryResults {
return NewQueryResults(nil, QueryResultsOptions{}, opts)
@@ -208,6 +212,9 @@ func (o *opts) Validate() error {
if o.postingsListCache == nil {
return errPostingsListCacheUnspecified
}
+ if o.queryStats == nil {
+ return errOptionsQueryStatsUnspecified
+ }
return nil
}
@@ -414,3 +421,13 @@ func (o *opts) SetMmapReporter(mmapReporter mmap.Reporter) Options {
func (o *opts) MmapReporter() mmap.Reporter {
return o.mmapReporter
}
+
+func (o *opts) SetQueryStats(value stats.QueryStats) Options {
+ opts := *o
+ opts.queryStats = value
+ return &opts
+}
+
+func (o *opts) QueryStats() stats.QueryStats {
+ return o.queryStats
+}
diff --git a/src/dbnode/storage/index/read_through_segment.go b/src/dbnode/storage/index/read_through_segment.go
index 598b1cc18a..52aaaf6251 100644
--- a/src/dbnode/storage/index/read_through_segment.go
+++ b/src/dbnode/storage/index/read_through_segment.go
@@ -85,7 +85,7 @@ func NewReadThroughSegment(
}
// Reader returns a read through reader for the read through segment.
-func (r *ReadThroughSegment) Reader() (index.Reader, error) {
+func (r *ReadThroughSegment) Reader() (segment.Reader, error) {
r.RLock()
defer r.RUnlock()
if r.closed {
@@ -159,18 +159,18 @@ type readThroughSegmentReader struct {
// reader is explicitly not embedded at the top level
// of the struct to force new methods added to index.Reader
// to be explicitly supported by the read through cache.
- reader index.Reader
+ reader segment.Reader
opts ReadThroughSegmentOptions
uuid uuid.UUID
postingsListCache *PostingsListCache
}
func newReadThroughSegmentReader(
- reader index.Reader,
+ reader segment.Reader,
uuid uuid.UUID,
cache *PostingsListCache,
opts ReadThroughSegmentOptions,
-) index.Reader {
+) segment.Reader {
return &readThroughSegmentReader{
reader: reader,
opts: opts,
@@ -272,6 +272,21 @@ func (s *readThroughSegmentReader) Docs(pl postings.List) (doc.Iterator, error)
return s.reader.Docs(pl)
}
+// Fields is a pass through call.
+func (s *readThroughSegmentReader) Fields() (segment.FieldsIterator, error) {
+ return s.reader.Fields()
+}
+
+// ContainsField is a pass through call.
+func (s *readThroughSegmentReader) ContainsField(field []byte) (bool, error) {
+ return s.reader.ContainsField(field)
+}
+
+// Terms is a pass through call.
+func (s *readThroughSegmentReader) Terms(field []byte) (segment.TermsIterator, error) {
+ return s.reader.Terms(field)
+}
+
// Close is a pass through call.
func (s *readThroughSegmentReader) Close() error {
return s.reader.Close()
diff --git a/src/dbnode/storage/index/read_through_segment_test.go b/src/dbnode/storage/index/read_through_segment_test.go
index 4f86561a88..cac33fb8d4 100644
--- a/src/dbnode/storage/index/read_through_segment_test.go
+++ b/src/dbnode/storage/index/read_through_segment_test.go
@@ -25,6 +25,7 @@ import (
"testing"
"github.com/m3db/m3/src/m3ninx/index"
+ "github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
"github.com/m3db/m3/src/m3ninx/postings/roaring"
@@ -43,9 +44,9 @@ func TestReadThroughSegmentMatchRegexp(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- segment := fst.NewMockSegment(ctrl)
- reader := index.NewMockReader(ctrl)
- segment.EXPECT().Reader().Return(reader, nil)
+ seg := fst.NewMockSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ seg.EXPECT().Reader().Return(reader, nil)
cache, stopReporting, err := NewPostingsListCache(1, testPostingListCacheOptions)
require.NoError(t, err)
@@ -59,7 +60,7 @@ func TestReadThroughSegmentMatchRegexp(t *testing.T) {
}
readThrough, err := NewReadThroughSegment(
- segment, cache, defaultReadThroughSegmentOptions).Reader()
+ seg, cache, defaultReadThroughSegmentOptions).Reader()
require.NoError(t, err)
originalPL := roaring.NewPostingsList()
@@ -82,9 +83,9 @@ func TestReadThroughSegmentMatchRegexpCacheDisabled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- segment := fst.NewMockSegment(ctrl)
- reader := index.NewMockReader(ctrl)
- segment.EXPECT().Reader().Return(reader, nil)
+ seg := fst.NewMockSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ seg.EXPECT().Reader().Return(reader, nil)
cache, stopReporting, err := NewPostingsListCache(1, testPostingListCacheOptions)
require.NoError(t, err)
@@ -97,7 +98,7 @@ func TestReadThroughSegmentMatchRegexpCacheDisabled(t *testing.T) {
FSTSyntax: parsedRegex,
}
- readThrough, err := NewReadThroughSegment(segment, cache, ReadThroughSegmentOptions{
+ readThrough, err := NewReadThroughSegment(seg, cache, ReadThroughSegmentOptions{
CacheRegexp: false,
}).Reader()
require.NoError(t, err)
@@ -126,20 +127,20 @@ func TestReadThroughSegmentMatchRegexpNoCache(t *testing.T) {
defer ctrl.Finish()
var (
- segment = fst.NewMockSegment(ctrl)
- reader = index.NewMockReader(ctrl)
+ seg = fst.NewMockSegment(ctrl)
+ reader = segment.NewMockReader(ctrl)
field = []byte("some-field")
parsedRegex, err = syntax.Parse(".*this-will-be-slow.*", syntax.Simple)
)
require.NoError(t, err)
- segment.EXPECT().Reader().Return(reader, nil)
+ seg.EXPECT().Reader().Return(reader, nil)
compiledRegex := index.CompiledRegex{
FSTSyntax: parsedRegex,
}
readThrough, err := NewReadThroughSegment(
- segment, nil, defaultReadThroughSegmentOptions).Reader()
+ seg, nil, defaultReadThroughSegmentOptions).Reader()
require.NoError(t, err)
originalPL := roaring.NewPostingsList()
@@ -156,9 +157,9 @@ func TestReadThroughSegmentMatchTerm(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- segment := fst.NewMockSegment(ctrl)
- reader := index.NewMockReader(ctrl)
- segment.EXPECT().Reader().Return(reader, nil)
+ seg := fst.NewMockSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ seg.EXPECT().Reader().Return(reader, nil)
cache, stopReporting, err := NewPostingsListCache(1, testPostingListCacheOptions)
require.NoError(t, err)
@@ -173,7 +174,7 @@ func TestReadThroughSegmentMatchTerm(t *testing.T) {
require.NoError(t, originalPL.Insert(1))
readThrough, err := NewReadThroughSegment(
- segment, cache, defaultReadThroughSegmentOptions).Reader()
+ seg, cache, defaultReadThroughSegmentOptions).Reader()
require.NoError(t, err)
reader.EXPECT().MatchTerm(field, term).Return(originalPL, nil)
@@ -194,9 +195,9 @@ func TestReadThroughSegmentMatchTermCacheDisabled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- segment := fst.NewMockSegment(ctrl)
- reader := index.NewMockReader(ctrl)
- segment.EXPECT().Reader().Return(reader, nil)
+ seg := fst.NewMockSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ seg.EXPECT().Reader().Return(reader, nil)
cache, stopReporting, err := NewPostingsListCache(1, testPostingListCacheOptions)
require.NoError(t, err)
@@ -210,7 +211,7 @@ func TestReadThroughSegmentMatchTermCacheDisabled(t *testing.T) {
)
require.NoError(t, originalPL.Insert(1))
- readThrough, err := NewReadThroughSegment(segment, cache, ReadThroughSegmentOptions{
+ readThrough, err := NewReadThroughSegment(seg, cache, ReadThroughSegmentOptions{
CacheTerms: false,
}).Reader()
require.NoError(t, err)
@@ -237,8 +238,8 @@ func TestReadThroughSegmentMatchTermNoCache(t *testing.T) {
defer ctrl.Finish()
var (
- segment = fst.NewMockSegment(ctrl)
- reader = index.NewMockReader(ctrl)
+ seg = fst.NewMockSegment(ctrl)
+ reader = segment.NewMockReader(ctrl)
field = []byte("some-field")
term = []byte("some-term")
@@ -247,10 +248,10 @@ func TestReadThroughSegmentMatchTermNoCache(t *testing.T) {
)
require.NoError(t, originalPL.Insert(1))
- segment.EXPECT().Reader().Return(reader, nil)
+ seg.EXPECT().Reader().Return(reader, nil)
readThrough, err := NewReadThroughSegment(
- segment, nil, defaultReadThroughSegmentOptions).Reader()
+ seg, nil, defaultReadThroughSegmentOptions).Reader()
require.NoError(t, err)
reader.EXPECT().MatchTerm(field, term).Return(originalPL, nil)
@@ -297,9 +298,9 @@ func TestReadThroughSegmentMatchField(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- segment := fst.NewMockSegment(ctrl)
- reader := index.NewMockReader(ctrl)
- segment.EXPECT().Reader().Return(reader, nil)
+ seg := fst.NewMockSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ seg.EXPECT().Reader().Return(reader, nil)
cache, stopReporting, err := NewPostingsListCache(1, testPostingListCacheOptions)
require.NoError(t, err)
@@ -313,7 +314,7 @@ func TestReadThroughSegmentMatchField(t *testing.T) {
require.NoError(t, originalPL.Insert(1))
readThrough, err := NewReadThroughSegment(
- segment, cache, defaultReadThroughSegmentOptions).Reader()
+ seg, cache, defaultReadThroughSegmentOptions).Reader()
require.NoError(t, err)
reader.EXPECT().MatchField(field).Return(originalPL, nil)
@@ -334,9 +335,9 @@ func TestReadThroughSegmentMatchFieldCacheDisabled(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- segment := fst.NewMockSegment(ctrl)
- reader := index.NewMockReader(ctrl)
- segment.EXPECT().Reader().Return(reader, nil)
+ seg := fst.NewMockSegment(ctrl)
+ reader := segment.NewMockReader(ctrl)
+ seg.EXPECT().Reader().Return(reader, nil)
cache, stopReporting, err := NewPostingsListCache(1, testPostingListCacheOptions)
require.NoError(t, err)
@@ -349,7 +350,7 @@ func TestReadThroughSegmentMatchFieldCacheDisabled(t *testing.T) {
)
require.NoError(t, originalPL.Insert(1))
- readThrough, err := NewReadThroughSegment(segment, cache, ReadThroughSegmentOptions{
+ readThrough, err := NewReadThroughSegment(seg, cache, ReadThroughSegmentOptions{
CacheTerms: false,
}).Reader()
require.NoError(t, err)
@@ -376,8 +377,8 @@ func TestReadThroughSegmentMatchFieldNoCache(t *testing.T) {
defer ctrl.Finish()
var (
- segment = fst.NewMockSegment(ctrl)
- reader = index.NewMockReader(ctrl)
+ seg = fst.NewMockSegment(ctrl)
+ reader = segment.NewMockReader(ctrl)
field = []byte("some-field")
@@ -385,10 +386,10 @@ func TestReadThroughSegmentMatchFieldNoCache(t *testing.T) {
)
require.NoError(t, originalPL.Insert(1))
- segment.EXPECT().Reader().Return(reader, nil)
+ seg.EXPECT().Reader().Return(reader, nil)
readThrough, err := NewReadThroughSegment(
- segment, nil, defaultReadThroughSegmentOptions).Reader()
+ seg, nil, defaultReadThroughSegmentOptions).Reader()
require.NoError(t, err)
reader.EXPECT().MatchField(field).Return(originalPL, nil)
@@ -403,12 +404,12 @@ func TestCloseNoCache(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- segment := fst.NewMockSegment(ctrl)
+ seg := fst.NewMockSegment(ctrl)
readThrough := NewReadThroughSegment(
- segment, nil, defaultReadThroughSegmentOptions)
+ seg, nil, defaultReadThroughSegmentOptions)
- segment.EXPECT().Close().Return(nil)
+ seg.EXPECT().Close().Return(nil)
err := readThrough.Close()
require.NoError(t, err)
require.True(t, readThrough.(*ReadThroughSegment).closed)
diff --git a/src/dbnode/storage/index/results.go b/src/dbnode/storage/index/results.go
index 239c5f3838..52f104c4fc 100644
--- a/src/dbnode/storage/index/results.go
+++ b/src/dbnode/storage/index/results.go
@@ -40,7 +40,8 @@ type results struct {
nsID ident.ID
opts QueryResultsOptions
- resultsMap *ResultsMap
+ resultsMap *ResultsMap
+ totalDocsCount int
idPool ident.Pool
bytesPool pool.CheckedBytesPool
@@ -88,6 +89,7 @@ func (r *results) Reset(nsID ident.ID, opts QueryResultsOptions) {
// Reset all keys in the map next, this will finalize the keys.
r.resultsMap.Reset()
+ r.totalDocsCount = 0
// NB: could do keys+value in one step but I'm trying to avoid
// using an internal method of a code-gen'd type.
@@ -97,12 +99,14 @@ func (r *results) Reset(nsID ident.ID, opts QueryResultsOptions) {
// NB: If documents with duplicate IDs are added, they are simply ignored and
// the first document added with an ID is returned.
-func (r *results) AddDocuments(batch []doc.Document) (int, error) {
+func (r *results) AddDocuments(batch []doc.Document) (int, int, error) {
r.Lock()
err := r.addDocumentsBatchWithLock(batch)
size := r.resultsMap.Len()
+ docsCount := r.totalDocsCount + len(batch)
+ r.totalDocsCount = docsCount
r.Unlock()
- return size, err
+ return size, docsCount, err
}
func (r *results) addDocumentsBatchWithLock(batch []doc.Document) error {
@@ -119,9 +123,7 @@ func (r *results) addDocumentsBatchWithLock(batch []doc.Document) error {
return nil
}
-func (r *results) addDocumentWithLock(
- d doc.Document,
-) (bool, int, error) {
+func (r *results) addDocumentWithLock(d doc.Document) (bool, int, error) {
if len(d.ID) == 0 {
return false, r.resultsMap.Len(), errUnableToAddResultMissingID
}
@@ -141,8 +143,8 @@ func (r *results) addDocumentWithLock(
}
// i.e. it doesn't exist in the map, so we create the tags wrapping
- // fields prodided by the document.
- tags := convert.ToMetricTags(d, convert.Opts{NoClone: true})
+ // fields provided by the document.
+ tags := convert.ToSeriesTags(d, convert.Opts{NoClone: true})
// It is assumed that the document is valid for the lifetime of the index
// results.
@@ -175,6 +177,13 @@ func (r *results) Size() int {
return v
}
+func (r *results) TotalDocsCount() int {
+ r.RLock()
+ count := r.totalDocsCount
+ r.RUnlock()
+ return count
+}
+
func (r *results) Finalize() {
// Reset locks so cannot hold onto lock for call to Finalize.
r.Reset(nil, QueryResultsOptions{})
diff --git a/src/dbnode/storage/index/results_new_map.go b/src/dbnode/storage/index/results_new_map.go
index a23da161aa..6620400594 100644
--- a/src/dbnode/storage/index/results_new_map.go
+++ b/src/dbnode/storage/index/results_new_map.go
@@ -23,7 +23,7 @@ package index
import (
"github.com/m3db/m3/src/x/ident"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
const (
diff --git a/src/dbnode/storage/index/results_test.go b/src/dbnode/storage/index/results_test.go
index 0123d888a9..15fa3581e2 100644
--- a/src/dbnode/storage/index/results_test.go
+++ b/src/dbnode/storage/index/results_test.go
@@ -56,29 +56,58 @@ func optionsWithDocsArrayPool(opts Options, size, capacity int) Options {
func TestResultsInsertInvalid(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
dInvalid := doc.Document{ID: nil}
- size, err := res.AddDocuments([]doc.Document{dInvalid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
+ require.Equal(t, 1, docsCount)
+
+ require.Equal(t, 0, res.Size())
+ require.Equal(t, 1, res.TotalDocsCount())
}
func TestResultsInsertIdempotency(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
dValid := doc.Document{ID: []byte("abc")}
- size, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 1, res.TotalDocsCount())
- size, err = res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err = res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 2, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 2, res.TotalDocsCount())
+}
+
+func TestResultsInsertBatchOfTwo(t *testing.T) {
+ res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
+ d1 := doc.Document{ID: []byte("d1")}
+ d2 := doc.Document{ID: []byte("d2")}
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1, d2})
+ require.NoError(t, err)
+ require.Equal(t, 2, size)
+ require.Equal(t, 2, docsCount)
+
+ require.Equal(t, 2, res.Size())
+ require.Equal(t, 2, res.TotalDocsCount())
}
func TestResultsFirstInsertWins(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
d1 := doc.Document{ID: []byte("abc")}
- size, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 1, res.TotalDocsCount())
tags, ok := res.Map().Get(ident.StringID("abc"))
require.True(t, ok)
@@ -88,9 +117,13 @@ func TestResultsFirstInsertWins(t *testing.T) {
Fields: doc.Fields{
doc.Field{Name: []byte("foo"), Value: []byte("bar")},
}}
- size, err = res.AddDocuments([]doc.Document{d2})
+ size, docsCount, err = res.AddDocuments([]doc.Document{d2})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 2, docsCount)
+
+ require.Equal(t, 1, res.Size())
+ require.Equal(t, 2, res.TotalDocsCount())
tags, ok = res.Map().Get(ident.StringID("abc"))
require.True(t, ok)
@@ -100,9 +133,10 @@ func TestResultsFirstInsertWins(t *testing.T) {
func TestResultsInsertContains(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
dValid := doc.Document{ID: []byte("abc")}
- size, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
tags, ok := res.Map().Get(ident.StringID("abc"))
require.True(t, ok)
@@ -112,11 +146,12 @@ func TestResultsInsertContains(t *testing.T) {
func TestResultsInsertDoesNotCopy(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
dValid := doc.Document{ID: []byte("abc"), Fields: []doc.Field{
- doc.Field{Name: []byte("name"), Value: []byte("value")},
+ {Name: []byte("name"), Value: []byte("value")},
}}
- size, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
found := false
@@ -158,9 +193,10 @@ func TestResultsInsertDoesNotCopy(t *testing.T) {
func TestResultsReset(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
d1 := doc.Document{ID: []byte("abc")}
- size, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
tags, ok := res.Map().Get(ident.StringID("abc"))
require.True(t, ok)
@@ -171,6 +207,7 @@ func TestResultsReset(t *testing.T) {
require.False(t, ok)
require.Equal(t, 0, tags.Remaining())
require.Equal(t, 0, res.Size())
+ require.Equal(t, 0, res.TotalDocsCount())
}
func TestResultsResetNamespaceClones(t *testing.T) {
@@ -190,9 +227,10 @@ func TestFinalize(t *testing.T) {
// Create a Results and insert some data.
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
d1 := doc.Document{ID: []byte("abc")}
- size, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Document{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
+ require.Equal(t, 1, docsCount)
// Ensure the data is present.
tags, ok := res.Map().Get(ident.StringID("abc"))
@@ -206,6 +244,7 @@ func TestFinalize(t *testing.T) {
tags, ok = res.Map().Get(ident.StringID("abc"))
require.False(t, ok)
require.Equal(t, 0, res.Size())
+ require.Equal(t, 0, res.TotalDocsCount())
for _, entry := range res.Map().Iter() {
id, _ := entry.Key(), entry.Value()
diff --git a/src/dbnode/storage/index/types.go b/src/dbnode/storage/index/types.go
index a6ff297f84..4a667f57a6 100644
--- a/src/dbnode/storage/index/types.go
+++ b/src/dbnode/storage/index/types.go
@@ -26,8 +26,10 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/clock"
+ "github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
+ "github.com/m3db/m3/src/dbnode/storage/stats"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/m3ninx/index/segment"
@@ -75,17 +77,31 @@ type Query struct {
idx.Query
}
-// QueryOptions enables users to specify constraints on query execution.
+// QueryOptions enables users to specify constraints and
+// preferences on query execution.
type QueryOptions struct {
- StartInclusive time.Time
- EndExclusive time.Time
- Limit int
+ StartInclusive time.Time
+ EndExclusive time.Time
+ SeriesLimit int
+ DocsLimit int
+ RequireExhaustive bool
}
-// LimitExceeded returns whether a given size exceeds the limit
-// the query options imposes, if it is enabled.
-func (o QueryOptions) LimitExceeded(size int) bool {
- return o.Limit > 0 && size >= o.Limit
+// IterationOptions enables users to specify iteration preferences.
+type IterationOptions struct {
+ SeriesIteratorConsolidator encoding.SeriesIteratorConsolidator
+}
+
+// SeriesLimitExceeded returns whether a given size exceeds the
+// series limit the query options imposes, if it is enabled.
+func (o QueryOptions) SeriesLimitExceeded(size int) bool {
+ return o.SeriesLimit > 0 && size >= o.SeriesLimit
+}
+
+// DocsLimitExceeded returns whether a given size exceeds the
+// docs limit the query options imposes, if it is enabled.
+func (o QueryOptions) DocsLimitExceeded(size int) bool {
+ return o.DocsLimit > 0 && size >= o.DocsLimit
}
// AggregationOptions enables users to specify constraints on aggregations.
@@ -117,12 +133,15 @@ type BaseResults interface {
// Size returns the number of IDs tracked.
Size() int
+ // TotalDocsCount returns the total number of documents observed.
+ TotalDocsCount() int
+
// AddDocuments adds the batch of documents to the results set, it will
// take a copy of the bytes backing the documents so the original can be
// modified after this function returns without affecting the results map.
// TODO(r): We will need to change this behavior once index fields are
// mutable and the most recent need to shadow older entries.
- AddDocuments(batch []doc.Document) (size int, err error)
+ AddDocuments(batch []doc.Document) (size, docsCount int, err error)
// Finalize releases any resources held by the Results object,
// including returning it to a backing pool.
@@ -194,7 +213,7 @@ type AggregateResults interface {
// i.e. it is not safe to use/modify the idents once this function returns.
AddFields(
batch []AggregateResultsEntry,
- ) (size int)
+ ) (size, docsCount int)
// Map returns a map from tag name -> possible tag values,
// comprising aggregate results.
@@ -221,6 +240,10 @@ type AggregateResultsOptions struct {
// FieldFilter is an optional param to filter aggregate values.
FieldFilter AggregateFieldFilter
+
+ // RestrictByQuery is a query to restrict the set of documents that must
+ // be present for an aggregated term to be returned.
+ RestrictByQuery *Query
}
// AggregateResultsAllocator allocates AggregateResults types.
@@ -326,7 +349,7 @@ type Block interface {
) (exhaustive bool, err error)
// AddResults adds bootstrap results to the block.
- AddResults(results result.IndexBlock) error
+ AddResults(resultsByVolumeType result.IndexBlockByVolumeType) error
// Tick does internal house keeping operations.
Tick(c context.Cancellable) (BlockTickResult, error)
@@ -353,6 +376,21 @@ type Block interface {
// data the mutable segments should have held at this time.
EvictMutableSegments() error
+ // NeedsMutableSegmentsEvicted returns whether this block has any cold mutable segments
+ // that are not-empty and sealed.
+ NeedsColdMutableSegmentsEvicted() bool
+
+ // EvictMutableSegments closes any stale cold mutable segments up to the currently active
+ // cold mutable segment (the one we are actively writing to).
+ EvictColdMutableSegments() error
+
+ // RotateColdMutableSegments rotates the currently active cold mutable segment out for a
+ // new cold mutable segment to write to.
+ RotateColdMutableSegments()
+
+ // MemorySegmentsData returns all in memory segments data.
+ MemorySegmentsData(ctx context.Context) ([]fst.SegmentData, error)
+
// Close will release any held resources and close the Block.
Close() error
}
@@ -374,15 +412,36 @@ func (e *EvictMutableSegmentResults) Add(o EvictMutableSegmentResults) {
// block and get an immutable list of segments back).
type BlockStatsReporter interface {
ReportSegmentStats(stats BlockSegmentStats)
+ ReportIndexingStats(stats BlockIndexingStats)
}
-// BlockStatsReporterFn implements the block stats reporter using
-// a callback function.
-type BlockStatsReporterFn func(stats BlockSegmentStats)
+type blockStatsReporter struct {
+ reportSegmentStats func(stats BlockSegmentStats)
+ reportIndexingStats func(stats BlockIndexingStats)
+}
+
+// NewBlockStatsReporter returns a new block stats reporter.
+func NewBlockStatsReporter(
+ reportSegmentStats func(stats BlockSegmentStats),
+ reportIndexingStats func(stats BlockIndexingStats),
+) BlockStatsReporter {
+ return blockStatsReporter{
+ reportSegmentStats: reportSegmentStats,
+ reportIndexingStats: reportIndexingStats,
+ }
+}
+
+func (r blockStatsReporter) ReportSegmentStats(stats BlockSegmentStats) {
+ r.reportSegmentStats(stats)
+}
+
+func (r blockStatsReporter) ReportIndexingStats(stats BlockIndexingStats) {
+ r.reportIndexingStats(stats)
+}
-// ReportSegmentStats implements the BlockStatsReporter interface.
-func (f BlockStatsReporterFn) ReportSegmentStats(stats BlockSegmentStats) {
- f(stats)
+// BlockIndexingStats is stats about a block's indexing stats.
+type BlockIndexingStats struct {
+ IndexConcurrency int
}
// BlockSegmentStats has segment stats.
@@ -413,8 +472,11 @@ type WriteBatchResult struct {
// BlockTickResult returns statistics about tick.
type BlockTickResult struct {
- NumSegments int64
- NumDocs int64
+ NumSegments int64
+ NumSegmentsBootstrapped int64
+ NumSegmentsMutable int64
+ NumDocs int64
+ FreeMmap int64
}
// WriteBatch is a batch type that allows for building of a slice of documents
@@ -699,11 +761,11 @@ func (b *WriteBatch) Less(i, j int) bool {
panic(fmt.Errorf("unexpected sort by: %d", b.sortBy))
}
- if b.entries[i].OnIndexSeries != nil && b.entries[j].OnIndexSeries == nil {
- // This other entry has already been marked and this hasn't
+ if !b.entries[i].result.Done && b.entries[j].result.Done {
+ // This entry has been marked done and the other this hasn't
return true
}
- if b.entries[i].OnIndexSeries == nil && b.entries[j].OnIndexSeries != nil {
+ if b.entries[i].result.Done && !b.entries[j].result.Done {
// This entry has already been marked and other hasn't
return false
}
@@ -771,7 +833,7 @@ type fieldsAndTermsIterator interface {
Close() error
// Reset resets the iterator to the start iterating the given segment.
- Reset(seg segment.Segment, opts fieldsAndTermsIteratorOpts) error
+ Reset(reader segment.Reader, opts fieldsAndTermsIteratorOpts) error
}
// Options control the Indexing knobs.
@@ -899,4 +961,10 @@ type Options interface {
// MmapReporter returns the mmap reporter.
MmapReporter() mmap.Reporter
+
+ // SetQueryStatsTracker sets current query stats.
+ SetQueryStats(value stats.QueryStats) Options
+
+ // QueryStats returns the current query stats.
+ QueryStats() stats.QueryStats
}
diff --git a/src/dbnode/storage/index/write_batch_test.go b/src/dbnode/storage/index/write_batch_test.go
new file mode 100644
index 0000000000..9e05a1f821
--- /dev/null
+++ b/src/dbnode/storage/index/write_batch_test.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package index
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/golang/mock/gomock"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+func TestWriteBatchSortByUnmarkedAndIndexBlockStart(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ blockSize := time.Hour
+
+ now := time.Now()
+ blockStart := now.Truncate(blockSize)
+
+ nowNotBlockStartAligned := now.
+ Truncate(blockSize).
+ Add(time.Minute)
+
+ h1 := NewMockOnIndexSeries(ctrl)
+ h1.EXPECT().OnIndexFinalize(xtime.ToUnixNano(blockStart))
+ h1.EXPECT().OnIndexSuccess(xtime.ToUnixNano(blockStart))
+
+ h2 := NewMockOnIndexSeries(ctrl)
+ h2.EXPECT().OnIndexFinalize(xtime.ToUnixNano(blockStart))
+
+ h3 := NewMockOnIndexSeries(ctrl)
+ h3.EXPECT().OnIndexFinalize(xtime.ToUnixNano(blockStart))
+ h3.EXPECT().OnIndexSuccess(xtime.ToUnixNano(blockStart))
+
+ batch := NewWriteBatch(WriteBatchOptions{
+ IndexBlockSize: blockSize,
+ })
+ batch.Append(WriteBatchEntry{
+ Timestamp: nowNotBlockStartAligned,
+ OnIndexSeries: h1,
+ }, testDoc1())
+ batch.Append(WriteBatchEntry{
+ Timestamp: nowNotBlockStartAligned,
+ OnIndexSeries: h2,
+ }, testDoc2())
+ batch.Append(WriteBatchEntry{
+ Timestamp: nowNotBlockStartAligned,
+ OnIndexSeries: h3,
+ }, testDoc3())
+
+ // Mark entry in middle as failure
+ batch.MarkUnmarkedEntryError(fmt.Errorf("an error"), 1)
+
+ // Now sort by unmarked and block start.
+ batch.SortByUnmarkedAndIndexBlockStart()
+
+ // Make sure two remaining.
+ require.Equal(t, 2, len(batch.PendingDocs()))
+
+ // Make sure marks two done.
+ batch.MarkUnmarkedEntriesSuccess()
+
+ // Make sure none remaining.
+ require.Equal(t, 0, len(batch.PendingDocs()))
+}
diff --git a/src/dbnode/storage/index_block_test.go b/src/dbnode/storage/index_block_test.go
index 28d0867a85..0ef65426e7 100644
--- a/src/dbnode/storage/index_block_test.go
+++ b/src/dbnode/storage/index_block_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -35,7 +35,9 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/context"
+ xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
@@ -127,7 +129,7 @@ func testNamespaceMetadata(blockSize, period time.Duration) namespace.Metadata {
}
func TestNamespaceIndexNewBlockFn(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
blockSize := time.Hour
@@ -143,13 +145,16 @@ func TestNamespaceIndexNewBlockFn(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
require.Equal(t, now.Truncate(blockSize), ts)
return mockBlock, nil
}
md := testNamespaceMetadata(blockSize, 4*time.Hour)
- index, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ index, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -171,7 +176,7 @@ func TestNamespaceIndexNewBlockFn(t *testing.T) {
}
func TestNamespaceIndexNewBlockFnRandomErr(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
blockSize := time.Hour
@@ -184,17 +189,20 @@ func TestNamespaceIndexNewBlockFnRandomErr(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
return nil, fmt.Errorf("randomerr")
}
md := testNamespaceMetadata(blockSize, 4*time.Hour)
- _, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ _, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.Error(t, err)
}
func TestNamespaceIndexWrite(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
blockSize := time.Hour
@@ -211,13 +219,16 @@ func TestNamespaceIndexWrite(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
require.Equal(t, now.Truncate(blockSize), ts)
return mockBlock, nil
}
md := testNamespaceMetadata(blockSize, 4*time.Hour)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -251,7 +262,7 @@ func TestNamespaceIndexWrite(t *testing.T) {
}
func TestNamespaceIndexWriteCreatesBlock(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
blockSize := time.Hour
@@ -279,6 +290,7 @@ func TestNamespaceIndexWriteCreatesBlock(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -290,7 +302,9 @@ func TestNamespaceIndexWriteCreatesBlock(t *testing.T) {
panic("should never get here")
}
md := testNamespaceMetadata(blockSize, 4*time.Hour)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -327,7 +341,7 @@ func TestNamespaceIndexWriteCreatesBlock(t *testing.T) {
}
func TestNamespaceIndexBootstrap(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
blockSize := time.Hour
@@ -356,6 +370,7 @@ func TestNamespaceIndexBootstrap(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -367,15 +382,23 @@ func TestNamespaceIndexBootstrap(t *testing.T) {
panic("should never get here")
}
md := testNamespaceMetadata(blockSize, 4*time.Hour)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
seg1 := segment.NewMockSegment(ctrl)
seg2 := segment.NewMockSegment(ctrl)
seg3 := segment.NewMockSegment(ctrl)
+ t0Results := result.NewIndexBlockByVolumeType(t0)
+ t0Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, false)},
+ result.NewShardTimeRangesFromRange(t0, t1, 1, 2, 3)))
+ t1Results := result.NewIndexBlockByVolumeType(t1)
+ t1Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, false), result.NewSegment(seg3, false)},
+ result.NewShardTimeRangesFromRange(t1, t2, 1, 2, 3)))
bootstrapResults := result.IndexResults{
- t0Nanos: result.NewIndexBlock(t0, []segment.Segment{seg1}, result.NewShardTimeRanges(t0, t1, 1, 2, 3)),
- t1Nanos: result.NewIndexBlock(t1, []segment.Segment{seg2, seg3}, result.NewShardTimeRanges(t1, t2, 1, 2, 3)),
+ t0Nanos: t0Results,
+ t1Nanos: t1Results,
}
b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil)
@@ -384,7 +407,7 @@ func TestNamespaceIndexBootstrap(t *testing.T) {
}
func TestNamespaceIndexTickExpire(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retentionPeriod := 4 * time.Hour
@@ -407,6 +430,7 @@ func TestNamespaceIndexTickExpire(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -415,7 +439,9 @@ func TestNamespaceIndexTickExpire(t *testing.T) {
panic("should never get here")
}
md := testNamespaceMetadata(blockSize, retentionPeriod)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
nowLock.Lock()
@@ -432,7 +458,7 @@ func TestNamespaceIndexTickExpire(t *testing.T) {
}
func TestNamespaceIndexTick(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retentionPeriod := 4 * time.Hour
@@ -456,6 +482,7 @@ func TestNamespaceIndexTick(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -464,7 +491,9 @@ func TestNamespaceIndexTick(t *testing.T) {
panic("should never get here")
}
md := testNamespaceMetadata(blockSize, retentionPeriod)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -518,7 +547,7 @@ func TestNamespaceIndexTick(t *testing.T) {
}
func TestNamespaceIndexBlockQuery(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retention := 2 * time.Hour
@@ -552,6 +581,7 @@ func TestNamespaceIndexBlockQuery(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -563,7 +593,9 @@ func TestNamespaceIndexBlockQuery(t *testing.T) {
panic("should never get here")
}
md := testNamespaceMetadata(blockSize, retention)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -573,58 +605,249 @@ func TestNamespaceIndexBlockQuery(t *testing.T) {
seg1 := segment.NewMockSegment(ctrl)
seg2 := segment.NewMockSegment(ctrl)
seg3 := segment.NewMockSegment(ctrl)
+ t0Results := result.NewIndexBlockByVolumeType(t0)
+ t0Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, false)},
+ result.NewShardTimeRangesFromRange(t0, t1, 1, 2, 3)))
+ t1Results := result.NewIndexBlockByVolumeType(t1)
+ t1Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, false), result.NewSegment(seg3, false)},
+ result.NewShardTimeRangesFromRange(t1, t2, 1, 2, 3)))
bootstrapResults := result.IndexResults{
- t0Nanos: result.NewIndexBlock(t0, []segment.Segment{seg1}, result.NewShardTimeRanges(t0, t1, 1, 2, 3)),
- t1Nanos: result.NewIndexBlock(t1, []segment.Segment{seg2, seg3}, result.NewShardTimeRanges(t1, t2, 1, 2, 3)),
+ t0Nanos: t0Results,
+ t1Nanos: t1Results,
}
b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil)
b1.EXPECT().AddResults(bootstrapResults[t1Nanos]).Return(nil)
require.NoError(t, idx.Bootstrap(bootstrapResults))
- // only queries as much as is needed (wrt to time)
- ctx := context.NewContext()
- q := defaultQuery
- qOpts := index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: now.Add(time.Minute),
+ for _, test := range []struct {
+ name string
+ requireExhaustive bool
+ }{
+ {"allow non-exhaustive", false},
+ {"require exhaustive", true},
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ // only queries as much as is needed (wrt to time)
+ ctx := context.NewContext()
+ q := defaultQuery
+ qOpts := index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: now.Add(time.Minute),
+ }
+
+ // create initial span from a mock tracer and get ctx
+ mtr := mocktracer.New()
+ sp := mtr.StartSpan("root")
+ ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
+
+ b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ result, err := idx.Query(ctx, q, qOpts)
+ require.NoError(t, err)
+ require.True(t, result.Exhaustive)
+
+ // queries multiple blocks if needed
+ qOpts = index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: t2.Add(time.Minute),
+ RequireExhaustive: test.requireExhaustive,
+ }
+ b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ b1.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ result, err = idx.Query(ctx, q, qOpts)
+ require.NoError(t, err)
+ require.True(t, result.Exhaustive)
+
+ // stops querying once a block returns non-exhaustive
+ qOpts = index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: t0.Add(time.Minute),
+ RequireExhaustive: test.requireExhaustive,
+ }
+ b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(false, nil)
+ result, err = idx.Query(ctx, q, qOpts)
+ if test.requireExhaustive {
+ require.Error(t, err)
+ require.False(t, xerrors.IsRetryableError(err))
+ } else {
+ require.NoError(t, err)
+ require.False(t, result.Exhaustive)
+ }
+
+ sp.Finish()
+ spans := mtr.FinishedSpans()
+ require.Len(t, spans, 11)
+ })
}
+}
- // create initial span from a mock tracer and get ctx
- mtr := mocktracer.New()
- sp := mtr.StartSpan("root")
- ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
+func TestLimits(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
- b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- _, err = idx.Query(ctx, q, qOpts)
- require.NoError(t, err)
+ retention := 2 * time.Hour
+ blockSize := time.Hour
+ now := time.Now().Truncate(blockSize).Add(10 * time.Minute)
+ t0 := now.Truncate(blockSize)
+ t0Nanos := xtime.ToUnixNano(t0)
+ t1 := t0.Add(1 * blockSize)
+ var nowLock sync.Mutex
+ nowFn := func() time.Time {
+ nowLock.Lock()
+ defer nowLock.Unlock()
+ return now
+ }
+ opts := DefaultTestOptions()
+ opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
- // queries multiple blocks if needed
- qOpts = index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: t2.Add(time.Minute),
+ b0 := index.NewMockBlock(ctrl)
+ b0.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
+ b0.EXPECT().Close().Return(nil).AnyTimes()
+ b0.EXPECT().StartTime().Return(t0).AnyTimes()
+ b0.EXPECT().EndTime().Return(t0.Add(blockSize)).AnyTimes()
+ newBlockFn := func(
+ ts time.Time,
+ md namespace.Metadata,
+ _ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
+ io index.Options,
+ ) (index.Block, error) {
+ if ts.Equal(t0) {
+ return b0, nil
+ }
+ panic("should never get here")
}
- b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- b1.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- _, err = idx.Query(ctx, q, qOpts)
+ md := testNamespaceMetadata(blockSize, retention)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
- // stops querying once a block returns non-exhaustive
- qOpts = index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: t0.Add(time.Minute),
+ defer func() {
+ require.NoError(t, idx.Close())
+ }()
+
+ seg1 := segment.NewMockSegment(ctrl)
+ t0Results := result.NewIndexBlockByVolumeType(t0)
+ t0Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, false)},
+ result.NewShardTimeRangesFromRange(t0, t1, 1, 2, 3)))
+ bootstrapResults := result.IndexResults{
+ t0Nanos: t0Results,
}
- b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(false, nil)
- _, err = idx.Query(ctx, q, qOpts)
- require.NoError(t, err)
- sp.Finish()
- spans := mtr.FinishedSpans()
- require.Len(t, spans, 11)
+ b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil)
+ require.NoError(t, idx.Bootstrap(bootstrapResults))
+
+ for _, test := range []struct {
+ name string
+ seriesLimit int
+ docsLimit int
+ requireExhaustive bool
+ expectedErr string
+ }{
+ {
+ name: "no limits",
+ seriesLimit: 0,
+ docsLimit: 0,
+ requireExhaustive: false,
+ expectedErr: "",
+ },
+ {
+ name: "series limit only",
+ seriesLimit: 1,
+ docsLimit: 0,
+ requireExhaustive: false,
+ expectedErr: "",
+ },
+ {
+ name: "docs limit only",
+ seriesLimit: 0,
+ docsLimit: 1,
+ requireExhaustive: false,
+ expectedErr: "",
+ },
+ {
+ name: "both series and docs limit",
+ seriesLimit: 1,
+ docsLimit: 1,
+ requireExhaustive: false,
+ expectedErr: "",
+ },
+ {
+ name: "no limits",
+ seriesLimit: 0,
+ docsLimit: 0,
+ requireExhaustive: true,
+ expectedErr: "query exceeded limit: require_exhaustive=true, series_limit=0, series_matched=1, docs_limit=0, docs_matched=2",
+ },
+ {
+ name: "series limit only",
+ seriesLimit: 1,
+ docsLimit: 0,
+ requireExhaustive: true,
+ expectedErr: "query exceeded limit: require_exhaustive=true, series_limit=1, series_matched=1, docs_limit=0, docs_matched=2",
+ },
+ {
+ name: "docs limit only",
+ seriesLimit: 0,
+ docsLimit: 1,
+ requireExhaustive: true,
+ expectedErr: "query exceeded limit: require_exhaustive=true, series_limit=0, series_matched=1, docs_limit=1, docs_matched=2",
+ },
+ {
+ name: "both series and docs limit",
+ seriesLimit: 1,
+ docsLimit: 1,
+ requireExhaustive: true,
+ expectedErr: "query exceeded limit: require_exhaustive=true, series_limit=1, series_matched=1, docs_limit=1, docs_matched=2",
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ // only queries as much as is needed (wrt to time)
+ ctx := context.NewContext()
+ q := defaultQuery
+ qOpts := index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: t1.Add(time.Minute),
+ SeriesLimit: test.seriesLimit,
+ DocsLimit: test.docsLimit,
+ RequireExhaustive: test.requireExhaustive,
+ }
+
+ // create initial span from a mock tracer and get ctx
+ mtr := mocktracer.New()
+ sp := mtr.StartSpan("root")
+ ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
+
+ b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).
+ DoAndReturn(func(ctx context.Context,
+ cancellable interface{},
+ query interface{},
+ opts interface{},
+ results index.BaseResults,
+ logFields interface{}) (bool, error) {
+ results.AddDocuments([]doc.Document{
+ // Results in size=1 and docs=2.
+ doc.Document{ID: []byte("A")},
+ doc.Document{ID: []byte("A")},
+ })
+ return false, nil
+ })
+
+ result, err := idx.Query(ctx, q, qOpts)
+ require.False(t, result.Exhaustive)
+ if test.requireExhaustive {
+ require.Error(t, err)
+ require.Equal(t, test.expectedErr, err.Error())
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
}
func TestNamespaceIndexBlockQueryReleasingContext(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retention := 2 * time.Hour
@@ -658,6 +881,7 @@ func TestNamespaceIndexBlockQueryReleasingContext(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -675,7 +899,9 @@ func TestNamespaceIndexBlockQueryReleasingContext(t *testing.T) {
stubResult := index.NewQueryResults(ident.StringID("ns"), index.QueryResultsOptions{}, iopts)
md := testNamespaceMetadata(blockSize, retention)
- idxIface, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idxIface, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
idx, ok := idxIface.(*nsIndex)
@@ -689,9 +915,15 @@ func TestNamespaceIndexBlockQueryReleasingContext(t *testing.T) {
seg1 := segment.NewMockSegment(ctrl)
seg2 := segment.NewMockSegment(ctrl)
seg3 := segment.NewMockSegment(ctrl)
+ t0Results := result.NewIndexBlockByVolumeType(t0)
+ t0Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, false)},
+ result.NewShardTimeRangesFromRange(t0, t1, 1, 2, 3)))
+ t1Results := result.NewIndexBlockByVolumeType(t1)
+ t1Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, false), result.NewSegment(seg3, false)},
+ result.NewShardTimeRangesFromRange(t1, t2, 1, 2, 3)))
bootstrapResults := result.IndexResults{
- t0Nanos: result.NewIndexBlock(t0, []segment.Segment{seg1}, result.NewShardTimeRanges(t0, t1, 1, 2, 3)),
- t1Nanos: result.NewIndexBlock(t1, []segment.Segment{seg2, seg3}, result.NewShardTimeRanges(t1, t2, 1, 2, 3)),
+ t0Nanos: t0Results,
+ t1Nanos: t1Results,
}
b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil)
@@ -715,7 +947,7 @@ func TestNamespaceIndexBlockQueryReleasingContext(t *testing.T) {
}
func TestNamespaceIndexBlockAggregateQuery(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
query := idx.NewTermQuery([]byte("a"), []byte("b"))
@@ -750,6 +982,7 @@ func TestNamespaceIndexBlockAggregateQuery(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -761,7 +994,9 @@ func TestNamespaceIndexBlockAggregateQuery(t *testing.T) {
panic("should never get here")
}
md := testNamespaceMetadata(blockSize, retention)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -771,64 +1006,91 @@ func TestNamespaceIndexBlockAggregateQuery(t *testing.T) {
seg1 := segment.NewMockSegment(ctrl)
seg2 := segment.NewMockSegment(ctrl)
seg3 := segment.NewMockSegment(ctrl)
+ t0Results := result.NewIndexBlockByVolumeType(t0)
+ t0Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, false)},
+ result.NewShardTimeRangesFromRange(t0, t1, 1, 2, 3)))
+ t1Results := result.NewIndexBlockByVolumeType(t1)
+ t1Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, false), result.NewSegment(seg3, false)},
+ result.NewShardTimeRangesFromRange(t1, t2, 1, 2, 3)))
bootstrapResults := result.IndexResults{
- t0Nanos: result.NewIndexBlock(t0, []segment.Segment{seg1}, result.NewShardTimeRanges(t0, t1, 1, 2, 3)),
- t1Nanos: result.NewIndexBlock(t1, []segment.Segment{seg2, seg3}, result.NewShardTimeRanges(t1, t2, 1, 2, 3)),
+ t0Nanos: t0Results,
+ t1Nanos: t1Results,
}
b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil)
b1.EXPECT().AddResults(bootstrapResults[t1Nanos]).Return(nil)
require.NoError(t, idx.Bootstrap(bootstrapResults))
- // only queries as much as is needed (wrt to time)
- ctx := context.NewContext()
-
- // create initial span from a mock tracer and get ctx
- mtr := mocktracer.New()
- sp := mtr.StartSpan("root")
- ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
-
- q := index.Query{
- Query: query,
- }
- qOpts := index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: now.Add(time.Minute),
- }
- aggOpts := index.AggregationOptions{QueryOptions: qOpts}
-
- b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- _, err = idx.AggregateQuery(ctx, q, aggOpts)
- require.NoError(t, err)
-
- // queries multiple blocks if needed
- qOpts = index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: t2.Add(time.Minute),
- }
- aggOpts = index.AggregationOptions{QueryOptions: qOpts}
- b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- b1.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- _, err = idx.AggregateQuery(ctx, q, aggOpts)
- require.NoError(t, err)
-
- // stops querying once a block returns non-exhaustive
- qOpts = index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: t0.Add(time.Minute),
+ for _, test := range []struct {
+ name string
+ requireExhaustive bool
+ }{
+ {"allow non-exhaustive", false},
+ {"require exhaustive", true},
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ // only queries as much as is needed (wrt to time)
+ ctx := context.NewContext()
+
+ // create initial span from a mock tracer and get ctx
+ mtr := mocktracer.New()
+ sp := mtr.StartSpan("root")
+ ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
+
+ q := index.Query{
+ Query: query,
+ }
+ qOpts := index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: now.Add(time.Minute),
+ RequireExhaustive: test.requireExhaustive,
+ }
+ aggOpts := index.AggregationOptions{QueryOptions: qOpts}
+
+ b0.EXPECT().Aggregate(gomock.Any(), gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ result, err := idx.AggregateQuery(ctx, q, aggOpts)
+ require.NoError(t, err)
+ require.True(t, result.Exhaustive)
+
+ // queries multiple blocks if needed
+ qOpts = index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: t2.Add(time.Minute),
+ RequireExhaustive: test.requireExhaustive,
+ }
+ aggOpts = index.AggregationOptions{QueryOptions: qOpts}
+ b0.EXPECT().Aggregate(gomock.Any(), gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ b1.EXPECT().Aggregate(gomock.Any(), gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ result, err = idx.AggregateQuery(ctx, q, aggOpts)
+ require.NoError(t, err)
+ require.True(t, result.Exhaustive)
+
+ // stops querying once a block returns non-exhaustive
+ qOpts = index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: t0.Add(time.Minute),
+ RequireExhaustive: test.requireExhaustive,
+ }
+ b0.EXPECT().Aggregate(gomock.Any(), gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(false, nil)
+ aggOpts = index.AggregationOptions{QueryOptions: qOpts}
+ result, err = idx.AggregateQuery(ctx, q, aggOpts)
+ if test.requireExhaustive {
+ require.Error(t, err)
+ require.False(t, xerrors.IsRetryableError(err))
+ } else {
+ require.NoError(t, err)
+ require.False(t, result.Exhaustive)
+ }
+
+ sp.Finish()
+ spans := mtr.FinishedSpans()
+ require.Len(t, spans, 11)
+ })
}
- b0.EXPECT().Query(gomock.Any(), gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(false, nil)
- aggOpts = index.AggregationOptions{QueryOptions: qOpts}
- _, err = idx.AggregateQuery(ctx, q, aggOpts)
- require.NoError(t, err)
-
- sp.Finish()
- spans := mtr.FinishedSpans()
- require.Len(t, spans, 11)
}
func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retention := 2 * time.Hour
@@ -863,6 +1125,7 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -877,10 +1140,13 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) {
iopts := opts.IndexOptions()
mockPool := index.NewMockAggregateResultsPool(ctrl)
iopts = iopts.SetAggregateResultsPool(mockPool)
- stubResult := index.NewAggregateResults(ident.StringID("ns"), index.AggregateResultsOptions{}, iopts)
+ stubResult := index.NewAggregateResults(ident.StringID("ns"),
+ index.AggregateResultsOptions{}, iopts)
md := testNamespaceMetadata(blockSize, retention)
- idxIface, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idxIface, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
idx, ok := idxIface.(*nsIndex)
@@ -894,9 +1160,15 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) {
seg1 := segment.NewMockSegment(ctrl)
seg2 := segment.NewMockSegment(ctrl)
seg3 := segment.NewMockSegment(ctrl)
+ t0Results := result.NewIndexBlockByVolumeType(t0)
+ t0Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, false)},
+ result.NewShardTimeRangesFromRange(t0, t1, 1, 2, 3)))
+ t1Results := result.NewIndexBlockByVolumeType(t1)
+ t1Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, false), result.NewSegment(seg3, false)},
+ result.NewShardTimeRangesFromRange(t1, t2, 1, 2, 3)))
bootstrapResults := result.IndexResults{
- t0Nanos: result.NewIndexBlock(t0, []segment.Segment{seg1}, result.NewShardTimeRanges(t0, t1, 1, 2, 3)),
- t1Nanos: result.NewIndexBlock(t1, []segment.Segment{seg2, seg3}, result.NewShardTimeRanges(t1, t2, 1, 2, 3)),
+ t0Nanos: t0Results,
+ t1Nanos: t1Results,
}
b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil)
@@ -916,7 +1188,7 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) {
gomock.InOrder(
mockPool.EXPECT().Get().Return(stubResult),
- b0.EXPECT().Query(ctx, gomock.Any(), q, qOpts, gomock.Any(), gomock.Any()).Return(true, nil),
+ b0.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil),
mockPool.EXPECT().Put(stubResult),
)
_, err = idx.AggregateQuery(ctx, q, aggOpts)
@@ -925,7 +1197,7 @@ func TestNamespaceIndexBlockAggregateQueryReleasingContext(t *testing.T) {
}
func TestNamespaceIndexBlockAggregateQueryAggPath(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
queries := []idx.Query{idx.NewAllQuery(), idx.NewFieldQuery([]byte("field"))}
@@ -960,6 +1232,7 @@ func TestNamespaceIndexBlockAggregateQueryAggPath(t *testing.T) {
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(t0) {
@@ -971,7 +1244,9 @@ func TestNamespaceIndexBlockAggregateQueryAggPath(t *testing.T) {
panic("should never get here")
}
md := testNamespaceMetadata(blockSize, retention)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -981,9 +1256,15 @@ func TestNamespaceIndexBlockAggregateQueryAggPath(t *testing.T) {
seg1 := segment.NewMockSegment(ctrl)
seg2 := segment.NewMockSegment(ctrl)
seg3 := segment.NewMockSegment(ctrl)
+ t0Results := result.NewIndexBlockByVolumeType(t0)
+ t0Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg1, false)},
+ result.NewShardTimeRangesFromRange(t0, t1, 1, 2, 3)))
+ t1Results := result.NewIndexBlockByVolumeType(t1)
+ t1Results.SetBlock(idxpersist.DefaultIndexVolumeType, result.NewIndexBlock([]result.Segment{result.NewSegment(seg2, false), result.NewSegment(seg3, false)},
+ result.NewShardTimeRangesFromRange(t1, t2, 1, 2, 3)))
bootstrapResults := result.IndexResults{
- t0Nanos: result.NewIndexBlock(t0, []segment.Segment{seg1}, result.NewShardTimeRanges(t0, t1, 1, 2, 3)),
- t1Nanos: result.NewIndexBlock(t1, []segment.Segment{seg2, seg3}, result.NewShardTimeRanges(t1, t2, 1, 2, 3)),
+ t0Nanos: t0Results,
+ t1Nanos: t1Results,
}
b0.EXPECT().AddResults(bootstrapResults[t0Nanos]).Return(nil)
@@ -999,33 +1280,53 @@ func TestNamespaceIndexBlockAggregateQueryAggPath(t *testing.T) {
}
aggOpts := index.AggregationOptions{QueryOptions: qOpts}
- for _, query := range queries {
- q := index.Query{
- Query: query,
- }
- b0.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- _, err = idx.AggregateQuery(ctx, q, aggOpts)
- require.NoError(t, err)
-
- // queries multiple blocks if needed
- qOpts = index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: t2.Add(time.Minute),
- }
- aggOpts = index.AggregationOptions{QueryOptions: qOpts}
- b0.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- b1.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
- _, err = idx.AggregateQuery(ctx, q, aggOpts)
- require.NoError(t, err)
-
- // stops querying once a block returns non-exhaustive
- qOpts = index.QueryOptions{
- StartInclusive: t0,
- EndExclusive: t0.Add(time.Minute),
- }
- b0.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(false, nil)
- aggOpts = index.AggregationOptions{QueryOptions: qOpts}
- _, err = idx.AggregateQuery(ctx, q, aggOpts)
- require.NoError(t, err)
+ for _, test := range []struct {
+ name string
+ requireExhaustive bool
+ }{
+ {"allow non-exhaustive", false},
+ {"require exhaustive", true},
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ for _, query := range queries {
+ q := index.Query{
+ Query: query,
+ }
+ b0.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ result, err := idx.AggregateQuery(ctx, q, aggOpts)
+ require.NoError(t, err)
+ require.True(t, result.Exhaustive)
+
+ // queries multiple blocks if needed
+ qOpts = index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: t2.Add(time.Minute),
+ RequireExhaustive: test.requireExhaustive,
+ }
+ aggOpts = index.AggregationOptions{QueryOptions: qOpts}
+ b0.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ b1.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(true, nil)
+ result, err = idx.AggregateQuery(ctx, q, aggOpts)
+ require.NoError(t, err)
+ require.True(t, result.Exhaustive)
+
+ // stops querying once a block returns non-exhaustive
+ qOpts = index.QueryOptions{
+ StartInclusive: t0,
+ EndExclusive: t0.Add(time.Minute),
+ RequireExhaustive: test.requireExhaustive,
+ }
+ b0.EXPECT().Aggregate(ctx, gomock.Any(), qOpts, gomock.Any(), gomock.Any()).Return(false, nil)
+ aggOpts = index.AggregationOptions{QueryOptions: qOpts}
+ result, err = idx.AggregateQuery(ctx, q, aggOpts)
+ if test.requireExhaustive {
+ require.Error(t, err)
+ require.False(t, xerrors.IsRetryableError(err))
+ } else {
+ require.NoError(t, err)
+ require.False(t, result.Exhaustive)
+ }
+ }
+ })
}
}
diff --git a/src/dbnode/storage/index_insert_queue.go b/src/dbnode/storage/index_insert_queue.go
index e127c15c10..f3edbbc7c8 100644
--- a/src/dbnode/storage/index_insert_queue.go
+++ b/src/dbnode/storage/index_insert_queue.go
@@ -22,12 +22,15 @@ package storage
import (
"errors"
+ "strconv"
"sync"
"time"
"github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
+ xsync "github.com/m3db/m3/src/x/sync"
"github.com/uber-go/tally"
)
@@ -45,9 +48,9 @@ const (
nsIndexInsertQueueStateClosed
// TODO(prateek): runtime options for this stuff
- defaultIndexBatchBackoff = time.Millisecond
+ defaultIndexBatchBackoff = 2 * time.Millisecond
- indexResetAllInsertsEvery = 30 * time.Second
+ indexResetAllInsertsEvery = 3 * time.Minute
)
type nsIndexInsertQueue struct {
@@ -69,6 +72,8 @@ type nsIndexInsertQueue struct {
notifyInsert chan struct{}
closeCh chan struct{}
+ scope tally.Scope
+
metrics nsIndexInsertQueueMetrics
}
@@ -97,16 +102,28 @@ func newNamespaceIndexInsertQueue(
indexBatchFn: indexBatchFn,
nowFn: nowFn,
sleepFn: time.Sleep,
- notifyInsert: make(chan struct{}, 1),
- closeCh: make(chan struct{}, 1),
- metrics: newNamespaceIndexInsertQueueMetrics(subscope),
+ // NB(r): Use 2 * num cores so that each CPU insert queue which
+ // is 1 per num CPU core can always enqueue a notification without
+ // it being lost.
+ notifyInsert: make(chan struct{}, 2*xsync.NumCores()),
+ closeCh: make(chan struct{}, 1),
+ scope: subscope,
+ metrics: newNamespaceIndexInsertQueueMetrics(subscope),
}
- q.currBatch = q.newBatch()
+ q.currBatch = q.newBatch(newBatchOptions{instrumented: true})
return q
}
-func (q *nsIndexInsertQueue) newBatch() *nsIndexInsertBatch {
- return newNsIndexInsertBatch(q.namespaceMetadata, q.nowFn)
+type newBatchOptions struct {
+ instrumented bool
+}
+
+func (q *nsIndexInsertQueue) newBatch(opts newBatchOptions) *nsIndexInsertBatch {
+ scope := tally.NoopScope
+ if opts.instrumented {
+ scope = q.scope
+ }
+ return newNsIndexInsertBatch(q.namespaceMetadata, q.nowFn, scope)
}
func (q *nsIndexInsertQueue) insertLoop() {
@@ -115,7 +132,7 @@ func (q *nsIndexInsertQueue) insertLoop() {
}()
var lastInsert time.Time
- freeBatch := q.newBatch()
+ batch := q.newBatch(newBatchOptions{})
for range q.notifyInsert {
// Check if inserting too fast
elapsedSinceLastInsert := q.nowFn().Sub(lastInsert)
@@ -124,38 +141,28 @@ func (q *nsIndexInsertQueue) insertLoop() {
var (
state nsIndexInsertQueueState
backoff time.Duration
- batch *nsIndexInsertBatch
)
q.Lock()
state = q.state
if elapsedSinceLastInsert < q.indexBatchBackoff {
// Need to backoff before rotate and insert
backoff = q.indexBatchBackoff - elapsedSinceLastInsert
- } else {
- // No backoff required, rotate and go
- batch = q.currBatch
- q.currBatch = freeBatch
}
q.Unlock()
if backoff > 0 {
q.sleepFn(backoff)
- q.Lock()
- // Rotate after backoff
- batch = q.currBatch
- q.currBatch = freeBatch
- q.Unlock()
}
- if len(batch.shardInserts) > 0 {
- all := batch.AllInserts()
+ // Rotate after backoff
+ batchWg := q.currBatch.Rotate(batch)
+
+ all := batch.AllInserts()
+ if all.Len() > 0 {
q.indexBatchFn(all)
}
- batch.wg.Done()
- // Set the free batch
- batch.Reset()
- freeBatch = batch
+ batchWg.Done()
lastInsert = q.nowFn()
@@ -168,21 +175,57 @@ func (q *nsIndexInsertQueue) insertLoop() {
func (q *nsIndexInsertQueue) InsertBatch(
batch *index.WriteBatch,
) (*sync.WaitGroup, error) {
- q.Lock()
- if q.state != nsIndexInsertQueueStateOpen {
- q.Unlock()
- return nil, errIndexInsertQueueNotOpen
- }
batchLen := batch.Len()
- q.currBatch.shardInserts = append(q.currBatch.shardInserts, batch)
- wg := q.currBatch.wg
- q.Unlock()
- // Notify insert loop
- select {
- case q.notifyInsert <- struct{}{}:
- default:
- // Loop busy, already ready to consume notification
+ // Choose the queue relevant to current CPU index.
+ // Note: since inserts by CPU core is allocated when
+ // nsIndexInsertBatch is constructed and then never modified
+ // it is safe to concurently read (but not modify obviously).
+ inserts := q.currBatch.insertsByCPUCore[xsync.CPUCore()]
+ inserts.Lock()
+ firstInsert := len(inserts.shardInserts) == 0
+ inserts.shardInserts = append(inserts.shardInserts, batch)
+ wg := inserts.wg
+ inserts.Unlock()
+
+ // Notify insert loop, only required if first to insert for this
+ // this CPU core.
+ if firstInsert {
+ select {
+ case q.notifyInsert <- struct{}{}:
+ default:
+ // Loop busy, already ready to consume notification.
+ }
+ }
+
+ q.metrics.numPending.Inc(int64(batchLen))
+ return wg, nil
+}
+
+func (q *nsIndexInsertQueue) InsertPending(
+ pending []writes.PendingIndexInsert,
+) (*sync.WaitGroup, error) {
+ batchLen := len(pending)
+
+ // Choose the queue relevant to current CPU index.
+ // Note: since inserts by CPU core is allocated when
+ // nsIndexInsertBatch is constructed and then never modified
+ // it is safe to concurently read (but not modify obviously).
+ inserts := q.currBatch.insertsByCPUCore[xsync.CPUCore()]
+ inserts.Lock()
+ firstInsert := len(inserts.batchInserts) == 0
+ inserts.batchInserts = append(inserts.batchInserts, pending...)
+ wg := inserts.wg
+ inserts.Unlock()
+
+ // Notify insert loop, only required if first to insert for this
+ // this CPU core.
+ if firstInsert {
+ select {
+ case q.notifyInsert <- struct{}{}:
+ default:
+ // Loop busy, already ready to consume notification.
+ }
}
q.metrics.numPending.Inc(int64(batchLen))
@@ -229,24 +272,66 @@ func (q *nsIndexInsertQueue) Stop() error {
type nsIndexInsertBatchFn func(inserts *index.WriteBatch)
type nsIndexInsertBatch struct {
- namespace namespace.Metadata
- nowFn clock.NowFn
- wg *sync.WaitGroup
- shardInserts []*index.WriteBatch
+ namespace namespace.Metadata
+ nowFn clock.NowFn
+ wg *sync.WaitGroup
+ // Note: since inserts by CPU core is allocated when
+ // nsIndexInsertBatch is constructed and then never modified
+ // it is safe to concurently read (but not modify obviously).
+ insertsByCPUCore []*nsIndexInsertsByCPUCore
allInserts *index.WriteBatch
allInsertsLastReset time.Time
}
+type nsIndexInsertsByCPUCore struct {
+ sync.Mutex
+ shardInserts []*index.WriteBatch
+ batchInserts []writes.PendingIndexInsert
+ wg *sync.WaitGroup
+ metrics nsIndexInsertsByCPUCoreMetrics
+}
+
+type nsIndexInsertsByCPUCoreMetrics struct {
+ rotateInsertsShard tally.Counter
+ rotateInsertsPending tally.Counter
+}
+
+func newNamespaceIndexInsertsByCPUCoreMetrics(
+ cpuIndex int,
+ scope tally.Scope,
+) nsIndexInsertsByCPUCoreMetrics {
+ scope = scope.Tagged(map[string]string{
+ "cpu-index": strconv.Itoa(cpuIndex),
+ })
+
+ const rotate = "rotate-inserts"
+ return nsIndexInsertsByCPUCoreMetrics{
+ rotateInsertsShard: scope.Tagged(map[string]string{
+ "rotate-type": "shard-insert",
+ }).Counter(rotate),
+ rotateInsertsPending: scope.Tagged(map[string]string{
+ "rotate-type": "pending-insert",
+ }).Counter(rotate),
+ }
+}
+
func newNsIndexInsertBatch(
namespace namespace.Metadata,
nowFn clock.NowFn,
+ scope tally.Scope,
) *nsIndexInsertBatch {
b := &nsIndexInsertBatch{
namespace: namespace,
nowFn: nowFn,
}
+ numCores := xsync.NumCores()
+ for i := 0; i < numCores; i++ {
+ b.insertsByCPUCore = append(b.insertsByCPUCore, &nsIndexInsertsByCPUCore{
+ metrics: newNamespaceIndexInsertsByCPUCoreMetrics(i, scope),
+ })
+ }
b.allocateAllInserts()
- b.Reset()
+ b.Rotate(nil)
return b
}
@@ -259,25 +344,96 @@ func (b *nsIndexInsertBatch) allocateAllInserts() {
func (b *nsIndexInsertBatch) AllInserts() *index.WriteBatch {
b.allInserts.Reset()
- for _, shardInserts := range b.shardInserts {
- b.allInserts.AppendAll(shardInserts)
+ for _, inserts := range b.insertsByCPUCore {
+ inserts.Lock()
+ for _, shardInserts := range inserts.shardInserts {
+ b.allInserts.AppendAll(shardInserts)
+ }
+ for _, insert := range inserts.batchInserts {
+ b.allInserts.Append(insert.Entry, insert.Document)
+ }
+ inserts.Unlock()
}
return b.allInserts
}
-func (b *nsIndexInsertBatch) Reset() {
+func (b *nsIndexInsertBatch) Rotate(target *nsIndexInsertBatch) *sync.WaitGroup {
+ prevWg := b.wg
+
+ // We always expect to be waiting for an index.
b.wg = &sync.WaitGroup{}
- // We always expect to be waiting for an index
b.wg.Add(1)
- for i := range b.shardInserts {
- // TODO(prateek): if we start pooling `[]index.WriteBatchEntry`, then we could return to the pool here.
- b.shardInserts[i] = nil
+
+ // Rotate to target if we need to.
+ for idx, inserts := range b.insertsByCPUCore {
+ if target == nil {
+ // No target to rotate with.
+ inserts.Lock()
+ // Reset
+ inserts.shardInserts = inserts.shardInserts[:0]
+ inserts.batchInserts = inserts.batchInserts[:0]
+ // Use new wait group.
+ inserts.wg = b.wg
+ inserts.Unlock()
+ continue
+ }
+
+ // First prepare the target to take the current batch's inserts.
+ targetInserts := target.insertsByCPUCore[idx]
+ targetInserts.Lock()
+
+ // Reset the target inserts since we'll take ref to them in a second.
+ for i := range targetInserts.shardInserts {
+ // TODO(prateek): if we start pooling `[]index.WriteBatchEntry`, then we could return to the pool here.
+ targetInserts.shardInserts[i] = nil
+ }
+ prevTargetShardInserts := targetInserts.shardInserts[:0]
+
+ // memset optimization
+ var zero writes.PendingIndexInsert
+ for i := range targetInserts.batchInserts {
+ targetInserts.batchInserts[i] = zero
+ }
+ prevTargetBatchInserts := targetInserts.batchInserts[:0]
+
+ // Lock the current batch inserts now ready to rotate to the target.
+ inserts.Lock()
+
+ // Update current slice refs to take target's inserts.
+ targetInserts.shardInserts = inserts.shardInserts
+ targetInserts.batchInserts = inserts.batchInserts
+ targetInserts.wg = inserts.wg
+
+ // Reuse the target's old slices.
+ inserts.shardInserts = prevTargetShardInserts
+ inserts.batchInserts = prevTargetBatchInserts
+
+ // Use new wait group.
+ inserts.wg = b.wg
+
+ // Unlock as early as possible for writes to keep enqueuing.
+ inserts.Unlock()
+
+ numTargetInsertsShard := len(targetInserts.shardInserts)
+ numTargetInsertsPending := len(targetInserts.batchInserts)
+
+ // Now can unlock target inserts too.
+ targetInserts.Unlock()
+
+ if n := numTargetInsertsShard; n > 0 {
+ inserts.metrics.rotateInsertsShard.Inc(int64(n))
+ }
+ if n := numTargetInsertsPending; n > 0 {
+ inserts.metrics.rotateInsertsPending.Inc(int64(n))
+ }
}
- b.shardInserts = b.shardInserts[:0]
+
if b.nowFn().Sub(b.allInsertsLastReset) > indexResetAllInsertsEvery {
// NB(r): Sometimes this can grow very high, so we reset it relatively frequently
b.allocateAllInserts()
}
+
+ return prevWg
}
type nsIndexInsertQueueMetrics struct {
diff --git a/src/dbnode/storage/index_query_concurrent_test.go b/src/dbnode/storage/index_query_concurrent_test.go
index a23b7f988a..f7f1172367 100644
--- a/src/dbnode/storage/index_query_concurrent_test.go
+++ b/src/dbnode/storage/index_query_concurrent_test.go
@@ -347,7 +347,7 @@ func testNamespaceIndexHighConcurrentQueries(
for _, entry := range results.Results.Map().Iter() {
id := entry.Key().String()
- doc, err := convert.FromMetricIterNoClone(entry.Key(), entry.Value())
+ doc, err := convert.FromSeriesIDAndTagIter(entry.Key(), entry.Value())
require.NoError(t, err)
if err != nil {
continue // this will fail the test anyway, but don't want to panic
diff --git a/src/dbnode/storage/index_queue_forward_write_test.go b/src/dbnode/storage/index_queue_forward_write_test.go
index 7b704ba007..e414b29317 100644
--- a/src/dbnode/storage/index_queue_forward_write_test.go
+++ b/src/dbnode/storage/index_queue_forward_write_test.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/series"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
"github.com/m3db/m3/src/m3ninx/doc"
m3ninxidx "github.com/m3db/m3/src/m3ninx/idx"
@@ -74,7 +75,7 @@ func generateOptionsNowAndBlockSize() (Options, time.Time, time.Duration) {
func setupForwardIndex(
t *testing.T,
ctrl *gomock.Controller,
-) (namespaceIndex, time.Time, time.Duration) {
+) (NamespaceIndex, time.Time, time.Duration) {
newFn := func(
fn nsIndexInsertBatchFn,
md namespace.Metadata,
@@ -90,7 +91,9 @@ func setupForwardIndex(
require.NoError(t, err)
opts, now, blockSize := generateOptionsNowAndBlockSize()
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, opts)
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, opts)
require.NoError(t, err)
var (
@@ -232,7 +235,7 @@ func createMockBlocks(
ctrl *gomock.Controller,
blockStart time.Time,
nextBlockStart time.Time,
-) (*index.MockBlock, *index.MockBlock, newBlockFn) {
+) (*index.MockBlock, *index.MockBlock, index.NewBlockFn) {
mockBlock := index.NewMockBlock(ctrl)
mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
mockBlock.EXPECT().Close().Return(nil)
@@ -248,6 +251,7 @@ func createMockBlocks(
ts time.Time,
md namespace.Metadata,
_ index.BlockOptions,
+ _ namespace.RuntimeOptionsManager,
io index.Options,
) (index.Block, error) {
if ts.Equal(blockStart) {
@@ -280,7 +284,9 @@ func TestNamespaceIndexForwardWrite(t *testing.T) {
mockBlock, futureBlock, newBlockFn := createMockBlocks(ctrl, blockStart, futureStart)
md := testNamespaceMetadata(blockSize, 4*time.Hour)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -320,7 +326,9 @@ func TestNamespaceIndexForwardWriteCreatesBlock(t *testing.T) {
mockBlock, futureBlock, newBlockFn := createMockBlocks(ctrl, blockStart, futureStart)
md := testNamespaceMetadata(blockSize, 4*time.Hour)
- idx, err := newNamespaceIndexWithNewBlockFn(md, testShardSet, newBlockFn, opts)
+ idx, err := newNamespaceIndexWithNewBlockFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newBlockFn, opts)
require.NoError(t, err)
defer func() {
@@ -378,7 +386,9 @@ func testShardForwardWriteTaggedRefCountIndex(
opts, now, blockSize := generateOptionsNowAndBlockSize()
opts = opts.SetIndexOptions(opts.IndexOptions().SetInsertMode(syncType))
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, opts)
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, opts)
require.NoError(t, err)
defer func() {
@@ -397,6 +407,7 @@ func writeToShard(
ctx context.Context,
t *testing.T,
shard *dbShard,
+ idx NamespaceIndex,
now time.Time,
id string,
shouldWrite bool,
@@ -404,7 +415,7 @@ func writeToShard(
tag := ident.Tag{Name: ident.StringID(id), Value: ident.StringID("")}
idTags := ident.NewTags(tag)
iter := ident.NewTagsIterator(idTags)
- _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID(id), iter, now,
+ seriesWrite, err := shard.WriteTagged(ctx, ident.StringID(id), iter, now,
1.0, xtime.Second, nil, series.WriteOptions{
TruncateType: series.TypeBlock,
TransformOptions: series.WriteTransformOptions{
@@ -413,13 +424,19 @@ func writeToShard(
},
})
require.NoError(t, err)
- require.Equal(t, shouldWrite, wasWritten)
+ require.Equal(t, shouldWrite, seriesWrite.WasWritten)
+ if seriesWrite.NeedsIndex {
+ err = idx.WritePending([]writes.PendingIndexInsert{
+ seriesWrite.PendingIndexInsert,
+ })
+ require.NoError(t, err)
+ }
}
func verifyShard(
ctx context.Context,
t *testing.T,
- idx namespaceIndex,
+ idx NamespaceIndex,
now time.Time,
next time.Time,
id string,
@@ -465,13 +482,13 @@ func writeToShardAndVerify(
ctx context.Context,
t *testing.T,
shard *dbShard,
- idx namespaceIndex,
+ idx NamespaceIndex,
now time.Time,
next time.Time,
id string,
shouldWrite bool,
) {
- writeToShard(ctx, t, shard, now, id, shouldWrite)
+ writeToShard(ctx, t, shard, idx, now, id, shouldWrite)
verifyShard(ctx, t, idx, now, next, id)
}
@@ -479,10 +496,10 @@ func testShardForwardWriteTaggedSyncRefCount(
t *testing.T,
now time.Time,
next time.Time,
- idx namespaceIndex,
+ idx NamespaceIndex,
opts Options,
) {
- shard := testDatabaseShardWithIndexFn(t, opts, idx)
+ shard := testDatabaseShardWithIndexFn(t, opts, idx, false)
shard.SetRuntimeOptions(runtime.NewOptions().
SetWriteNewSeriesAsync(false))
defer shard.Close()
@@ -524,7 +541,7 @@ func testShardForwardWriteTaggedAsyncRefCount(
t *testing.T,
now time.Time,
next time.Time,
- idx namespaceIndex,
+ idx NamespaceIndex,
opts Options,
) {
testReporterOpts := xmetrics.NewTestStatsReporterOptions()
@@ -538,7 +555,7 @@ func testShardForwardWriteTaggedAsyncRefCount(
SetMetricsScope(scope).
SetReportInterval(100 * time.Millisecond))
- shard := testDatabaseShardWithIndexFn(t, opts, idx)
+ shard := testDatabaseShardWithIndexFn(t, opts, idx, false)
shard.SetRuntimeOptions(runtime.NewOptions().
SetWriteNewSeriesAsync(true))
defer shard.Close()
@@ -546,9 +563,9 @@ func testShardForwardWriteTaggedAsyncRefCount(
ctx := context.NewContext()
defer ctx.Close()
- writeToShard(ctx, t, shard, now, "foo", true)
- writeToShard(ctx, t, shard, now, "bar", true)
- writeToShard(ctx, t, shard, now, "baz", true)
+ writeToShard(ctx, t, shard, idx, now, "foo", true)
+ writeToShard(ctx, t, shard, idx, now, "bar", true)
+ writeToShard(ctx, t, shard, idx, now, "baz", true)
verifyShard(ctx, t, idx, now, next, "foo")
verifyShard(ctx, t, idx, now, next, "bar")
diff --git a/src/dbnode/storage/index_queue_test.go b/src/dbnode/storage/index_queue_test.go
index 096ed91f47..4b8f57135e 100644
--- a/src/dbnode/storage/index_queue_test.go
+++ b/src/dbnode/storage/index_queue_test.go
@@ -47,7 +47,7 @@ func testNamespaceIndexOptions() index.Options {
return DefaultTestOptions().IndexOptions()
}
-func newTestNamespaceIndex(t *testing.T, ctrl *gomock.Controller) (namespaceIndex, *MocknamespaceIndexInsertQueue) {
+func newTestNamespaceIndex(t *testing.T, ctrl *gomock.Controller) (NamespaceIndex, *MocknamespaceIndexInsertQueue) {
q := NewMocknamespaceIndexInsertQueue(ctrl)
newFn := func(fn nsIndexInsertBatchFn, md namespace.Metadata, nowFn clock.NowFn, s tally.Scope) namespaceIndexInsertQueue {
return q
@@ -55,7 +55,9 @@ func newTestNamespaceIndex(t *testing.T, ctrl *gomock.Controller) (namespaceInde
q.EXPECT().Start().Return(nil)
md, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts)
require.NoError(t, err)
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, DefaultTestOptions())
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, DefaultTestOptions())
assert.NoError(t, err)
return idx, q
}
@@ -72,7 +74,9 @@ func TestNamespaceIndexHappyPath(t *testing.T) {
md, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts)
require.NoError(t, err)
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, DefaultTestOptions())
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, DefaultTestOptions())
assert.NoError(t, err)
assert.NotNil(t, idx)
@@ -91,7 +95,9 @@ func TestNamespaceIndexStartErr(t *testing.T) {
q.EXPECT().Start().Return(fmt.Errorf("random err"))
md, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts)
require.NoError(t, err)
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, DefaultTestOptions())
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, DefaultTestOptions())
assert.Error(t, err)
assert.Nil(t, idx)
}
@@ -108,7 +114,9 @@ func TestNamespaceIndexStopErr(t *testing.T) {
md, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts)
require.NoError(t, err)
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, DefaultTestOptions())
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, DefaultTestOptions())
assert.NoError(t, err)
assert.NotNil(t, idx)
@@ -187,7 +195,9 @@ func TestNamespaceIndexInsertOlderThanRetentionPeriod(t *testing.T) {
opts := testNamespaceIndexOptions().SetInsertMode(index.InsertSync)
opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
- dbIdx, err := newNamespaceIndex(md, testShardSet, DefaultTestOptions().SetIndexOptions(opts))
+ dbIdx, err := newNamespaceIndex(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, DefaultTestOptions().SetIndexOptions(opts))
assert.NoError(t, err)
idx, ok := dbIdx.(*nsIndex)
@@ -272,7 +282,7 @@ func TestNamespaceIndexInsertQueueInteraction(t *testing.T) {
func setupIndex(t *testing.T,
ctrl *gomock.Controller,
now time.Time,
-) namespaceIndex {
+) NamespaceIndex {
newFn := func(
fn nsIndexInsertBatchFn,
md namespace.Metadata,
@@ -285,8 +295,11 @@ func setupIndex(t *testing.T,
}
md, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts)
require.NoError(t, err)
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, DefaultTestOptions().
- SetIndexOptions(testNamespaceIndexOptions().SetInsertMode(index.InsertSync)))
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, DefaultTestOptions().
+ SetIndexOptions(testNamespaceIndexOptions().
+ SetInsertMode(index.InsertSync)))
assert.NoError(t, err)
var (
diff --git a/src/dbnode/storage/index_test.go b/src/dbnode/storage/index_test.go
index 5af3d228ce..6a93d6e671 100644
--- a/src/dbnode/storage/index_test.go
+++ b/src/dbnode/storage/index_test.go
@@ -22,21 +22,29 @@ package storage
import (
"fmt"
+ "io/ioutil"
+ "os"
"testing"
"time"
+ indexpb "github.com/m3db/m3/src/dbnode/generated/proto/index"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/context"
+ xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
+ protobuftypes "github.com/gogo/protobuf/types"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -44,7 +52,9 @@ import (
func TestNamespaceIndexCleanupExpiredFilesets(t *testing.T) {
md := testNamespaceMetadata(time.Hour, time.Hour*8)
- nsIdx, err := newNamespaceIndex(md, testShardSet, DefaultTestOptions())
+ nsIdx, err := newNamespaceIndex(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, DefaultTestOptions())
require.NoError(t, err)
now := time.Now().Truncate(time.Hour)
@@ -64,12 +74,155 @@ func TestNamespaceIndexCleanupExpiredFilesets(t *testing.T) {
require.NoError(t, idx.CleanupExpiredFileSets(now))
}
+func TestNamespaceIndexCleanupDuplicateFilesets(t *testing.T) {
+ md := testNamespaceMetadata(time.Hour, time.Hour*8)
+ nsIdx, err := newNamespaceIndex(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, DefaultTestOptions())
+ require.NoError(t, err)
+
+ idx := nsIdx.(*nsIndex)
+ now := time.Now().Truncate(time.Hour)
+ indexBlockSize := 2 * time.Hour
+ blockTime := now.Add(-2 * indexBlockSize)
+
+ dir, err := ioutil.TempDir("", t.Name())
+ require.NoError(t, err)
+
+ defer os.RemoveAll(dir)
+
+ fset1, err := ioutil.TempFile(dir, "fileset-9000-0-")
+ require.NoError(t, err)
+ fset2, err := ioutil.TempFile(dir, "fileset-9000-1-")
+ require.NoError(t, err)
+ fset3, err := ioutil.TempFile(dir, "fileset-9000-2-")
+ require.NoError(t, err)
+
+ volumeType := "extra"
+ infoFiles := []fs.ReadIndexInfoFileResult{
+ {
+ Info: indexpb.IndexVolumeInfo{
+ BlockStart: blockTime.UnixNano(),
+ BlockSize: int64(indexBlockSize),
+ Shards: []uint32{0, 1, 2},
+ IndexVolumeType: &protobuftypes.StringValue{
+ Value: volumeType,
+ },
+ },
+ AbsoluteFilePaths: []string{fset1.Name()},
+ },
+ {
+ Info: indexpb.IndexVolumeInfo{
+ BlockStart: blockTime.UnixNano(),
+ BlockSize: int64(indexBlockSize),
+ Shards: []uint32{0, 1, 2},
+ IndexVolumeType: &protobuftypes.StringValue{
+ Value: volumeType,
+ },
+ },
+ AbsoluteFilePaths: []string{fset2.Name()},
+ },
+ {
+ Info: indexpb.IndexVolumeInfo{
+ BlockStart: blockTime.UnixNano(),
+ BlockSize: int64(indexBlockSize),
+ Shards: []uint32{0, 1, 2, 3},
+ IndexVolumeType: &protobuftypes.StringValue{
+ Value: volumeType,
+ },
+ },
+ AbsoluteFilePaths: []string{fset3.Name()},
+ },
+ }
+
+ idx.readIndexInfoFilesFn = func(
+ filePathPrefix string,
+ namespace ident.ID,
+ readerBufferSize int,
+ ) []fs.ReadIndexInfoFileResult {
+ return infoFiles
+ }
+ idx.deleteFilesFn = func(s []string) error {
+ require.Equal(t, []string{fset1.Name(), fset2.Name()}, s)
+ multiErr := xerrors.NewMultiError()
+ for _, file := range s {
+ multiErr = multiErr.Add(os.Remove(file))
+ }
+ return multiErr.FinalError()
+ }
+ require.NoError(t, idx.CleanupDuplicateFileSets())
+}
+
+func TestNamespaceIndexCleanupDuplicateFilesetsNoop(t *testing.T) {
+ md := testNamespaceMetadata(time.Hour, time.Hour*8)
+ nsIdx, err := newNamespaceIndex(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, DefaultTestOptions())
+ require.NoError(t, err)
+
+ idx := nsIdx.(*nsIndex)
+ now := time.Now().Truncate(time.Hour)
+ indexBlockSize := 2 * time.Hour
+ blockTime := now.Add(-2 * indexBlockSize)
+
+ dir, err := ioutil.TempDir("", t.Name())
+ require.NoError(t, err)
+
+ defer os.RemoveAll(dir)
+
+ fset1, err := ioutil.TempFile(dir, "fileset-9000-0-")
+ require.NoError(t, err)
+ fset2, err := ioutil.TempFile(dir, "fileset-9000-1-")
+ require.NoError(t, err)
+
+ volumeType := string(idxpersist.DefaultIndexVolumeType)
+ infoFiles := []fs.ReadIndexInfoFileResult{
+ {
+ Info: indexpb.IndexVolumeInfo{
+ BlockStart: blockTime.UnixNano(),
+ BlockSize: int64(indexBlockSize),
+ Shards: []uint32{0, 1, 2},
+ IndexVolumeType: &protobuftypes.StringValue{
+ Value: volumeType,
+ },
+ },
+ AbsoluteFilePaths: []string{fset1.Name()},
+ },
+ {
+ Info: indexpb.IndexVolumeInfo{
+ BlockStart: blockTime.UnixNano(),
+ BlockSize: int64(indexBlockSize),
+ Shards: []uint32{4},
+ IndexVolumeType: &protobuftypes.StringValue{
+ Value: volumeType,
+ },
+ },
+ AbsoluteFilePaths: []string{fset2.Name()},
+ },
+ }
+
+ idx.readIndexInfoFilesFn = func(
+ filePathPrefix string,
+ namespace ident.ID,
+ readerBufferSize int,
+ ) []fs.ReadIndexInfoFileResult {
+ return infoFiles
+ }
+ idx.deleteFilesFn = func(s []string) error {
+ require.Equal(t, []string{}, s)
+ return nil
+ }
+ require.NoError(t, idx.CleanupDuplicateFileSets())
+}
+
func TestNamespaceIndexCleanupExpiredFilesetsWithBlocks(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
md := testNamespaceMetadata(time.Hour, time.Hour*8)
- nsIdx, err := newNamespaceIndex(md, testShardSet, DefaultTestOptions())
+ nsIdx, err := newNamespaceIndex(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, DefaultTestOptions())
require.NoError(t, err)
defer func() {
@@ -98,103 +251,43 @@ func TestNamespaceIndexFlushSuccess(t *testing.T) {
test := newTestIndex(t, ctrl)
- now := time.Now().Truncate(test.indexBlockSize)
idx := test.index.(*nsIndex)
defer func() {
require.NoError(t, idx.Close())
}()
- mockBlock := index.NewMockBlock(ctrl)
- mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
- blockTime := now.Add(-2 * test.indexBlockSize)
- mockBlock.EXPECT().StartTime().Return(blockTime).AnyTimes()
- mockBlock.EXPECT().EndTime().Return(blockTime.Add(test.indexBlockSize)).AnyTimes()
- idx.state.blocksByTime[xtime.ToUnixNano(blockTime)] = mockBlock
-
- mockBlock.EXPECT().IsSealed().Return(true)
- mockBlock.EXPECT().NeedsMutableSegmentsEvicted().Return(true)
- mockBlock.EXPECT().Close().Return(nil)
-
- mockShard := NewMockdatabaseShard(ctrl)
- mockShard.EXPECT().ID().Return(uint32(0)).AnyTimes()
- mockShard.EXPECT().FlushState(blockTime).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
- mockShard.EXPECT().FlushState(blockTime.Add(test.blockSize)).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
- shards := []databaseShard{mockShard}
-
- mockFlush := persist.NewMockIndexFlush(ctrl)
-
- persistClosed := false
- persistCalled := false
- closer := func() ([]segment.Segment, error) {
- persistClosed = true
- return nil, nil
- }
- persistFn := func(segment.Builder) error {
- persistCalled = true
- return nil
- }
- preparedPersist := persist.PreparedIndexPersist{
- Close: closer,
- Persist: persistFn,
- }
- mockFlush.EXPECT().PrepareIndex(xtest.CmpMatcher(persist.IndexPrepareOptions{
- NamespaceMetadata: test.metadata,
- BlockStart: blockTime,
- FileSetType: persist.FileSetFlushType,
- Shards: map[uint32]struct{}{0: struct{}{}},
- })).Return(preparedPersist, nil)
-
- results := block.NewMockFetchBlocksMetadataResults(ctrl)
- results.EXPECT().Results().Return(nil)
- results.EXPECT().Close()
- mockShard.EXPECT().FetchBlocksMetadataV2(gomock.Any(), blockTime, blockTime.Add(test.indexBlockSize),
- gomock.Any(), gomock.Any(), block.FetchBlocksMetadataOptions{}).Return(results, nil, nil)
-
- mockBlock.EXPECT().AddResults(gomock.Any()).Return(nil)
- mockBlock.EXPECT().EvictMutableSegments().Return(nil)
-
- require.NoError(t, idx.Flush(mockFlush, shards))
- require.True(t, persistCalled)
- require.True(t, persistClosed)
+ verifyFlushForShards(
+ t,
+ ctrl,
+ idx,
+ test.blockSize,
+ []uint32{0},
+ )
}
-func TestNamespaceIndexFlushShardStateNotSuccess(t *testing.T) {
+func TestNamespaceIndexFlushSuccessMultipleShards(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
test := newTestIndex(t, ctrl)
- now := time.Now().Truncate(test.indexBlockSize)
idx := test.index.(*nsIndex)
defer func() {
require.NoError(t, idx.Close())
}()
- mockBlock := index.NewMockBlock(ctrl)
- mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
- blockTime := now.Add(-2 * test.indexBlockSize)
- mockBlock.EXPECT().StartTime().Return(blockTime).AnyTimes()
- mockBlock.EXPECT().EndTime().Return(blockTime.Add(test.indexBlockSize)).AnyTimes()
- idx.state.blocksByTime[xtime.ToUnixNano(blockTime)] = mockBlock
-
- mockBlock.EXPECT().IsSealed().Return(true)
- mockBlock.EXPECT().NeedsMutableSegmentsEvicted().Return(true)
- mockBlock.EXPECT().Close().Return(nil)
-
- mockShard := NewMockdatabaseShard(ctrl)
- mockShard.EXPECT().ID().Return(uint32(0)).AnyTimes()
- mockShard.EXPECT().FlushState(blockTime).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
- mockShard.EXPECT().FlushState(blockTime.Add(test.blockSize)).Return(fileOpState{WarmStatus: fileOpFailed}, nil)
- shards := []databaseShard{mockShard}
-
- mockFlush := persist.NewMockIndexFlush(ctrl)
-
- require.NoError(t, idx.Flush(mockFlush, shards))
+ verifyFlushForShards(
+ t,
+ ctrl,
+ idx,
+ test.blockSize,
+ []uint32{0, 1, 2},
+ )
}
-func TestNamespaceIndexFlushSuccessMultipleShards(t *testing.T) {
+func TestNamespaceIndexFlushShardStateNotSuccess(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
@@ -207,6 +300,8 @@ func TestNamespaceIndexFlushSuccessMultipleShards(t *testing.T) {
require.NoError(t, idx.Close())
}()
+ // NB(bodu): We don't need to allocate a mock block for every block start we just need to
+ // ensure that we aren't flushing index data if TSDB is not on disk and a single mock block is sufficient.
mockBlock := index.NewMockBlock(ctrl)
mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
blockTime := now.Add(-2 * test.indexBlockSize)
@@ -215,62 +310,16 @@ func TestNamespaceIndexFlushSuccessMultipleShards(t *testing.T) {
idx.state.blocksByTime[xtime.ToUnixNano(blockTime)] = mockBlock
mockBlock.EXPECT().IsSealed().Return(true)
- mockBlock.EXPECT().NeedsMutableSegmentsEvicted().Return(true)
mockBlock.EXPECT().Close().Return(nil)
- mockShard1 := NewMockdatabaseShard(ctrl)
- mockShard1.EXPECT().ID().Return(uint32(0)).AnyTimes()
- mockShard1.EXPECT().FlushState(blockTime).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
- mockShard1.EXPECT().FlushState(blockTime.Add(test.blockSize)).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
-
- mockShard2 := NewMockdatabaseShard(ctrl)
- mockShard2.EXPECT().ID().Return(uint32(1)).AnyTimes()
- mockShard2.EXPECT().FlushState(blockTime).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
- mockShard2.EXPECT().FlushState(blockTime.Add(test.blockSize)).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
-
- shards := []databaseShard{mockShard1, mockShard2}
+ mockShard := NewMockdatabaseShard(ctrl)
+ mockShard.EXPECT().ID().Return(uint32(0)).AnyTimes()
+ mockShard.EXPECT().FlushState(gomock.Any()).Return(fileOpState{WarmStatus: fileOpFailed}, nil).AnyTimes()
+ shards := []databaseShard{mockShard}
mockFlush := persist.NewMockIndexFlush(ctrl)
- persistClosed := false
- numPersistCalls := 0
- closer := func() ([]segment.Segment, error) {
- persistClosed = true
- return nil, nil
- }
- persistFn := func(segment.Builder) error {
- numPersistCalls++
- return nil
- }
- preparedPersist := persist.PreparedIndexPersist{
- Close: closer,
- Persist: persistFn,
- }
- mockFlush.EXPECT().PrepareIndex(xtest.CmpMatcher(persist.IndexPrepareOptions{
- NamespaceMetadata: test.metadata,
- BlockStart: blockTime,
- FileSetType: persist.FileSetFlushType,
- Shards: map[uint32]struct{}{0: struct{}{}, 1: struct{}{}},
- })).Return(preparedPersist, nil)
-
- results1 := block.NewMockFetchBlocksMetadataResults(ctrl)
- results1.EXPECT().Results().Return(nil)
- results1.EXPECT().Close()
- mockShard1.EXPECT().FetchBlocksMetadataV2(gomock.Any(), blockTime, blockTime.Add(test.indexBlockSize),
- gomock.Any(), gomock.Any(), block.FetchBlocksMetadataOptions{}).Return(results1, nil, nil)
-
- results2 := block.NewMockFetchBlocksMetadataResults(ctrl)
- results2.EXPECT().Results().Return(nil)
- results2.EXPECT().Close()
- mockShard2.EXPECT().FetchBlocksMetadataV2(gomock.Any(), blockTime, blockTime.Add(test.indexBlockSize),
- gomock.Any(), gomock.Any(), block.FetchBlocksMetadataOptions{}).Return(results2, nil, nil)
-
- mockBlock.EXPECT().AddResults(gomock.Any()).Return(nil)
- mockBlock.EXPECT().EvictMutableSegments().Return(nil)
-
- require.NoError(t, idx.Flush(mockFlush, shards))
- require.Equal(t, 1, numPersistCalls)
- require.True(t, persistClosed)
+ require.NoError(t, idx.WarmFlush(mockFlush, shards))
}
func TestNamespaceIndexQueryNoMatchingBlocks(t *testing.T) {
@@ -319,8 +368,123 @@ func TestNamespaceIndexQueryNoMatchingBlocks(t *testing.T) {
assert.Equal(t, 0, aggResult.Results.Size())
}
+func verifyFlushForShards(
+ t *testing.T,
+ ctrl *gomock.Controller,
+ idx *nsIndex,
+ blockSize time.Duration,
+ shards []uint32,
+) {
+ var (
+ mockFlush = persist.NewMockIndexFlush(ctrl)
+ shardMap = make(map[uint32]struct{})
+ now = time.Now()
+ warmBlockStart = now.Add(-idx.bufferPast).Truncate(idx.blockSize)
+ mockShards []*MockdatabaseShard
+ dbShards []databaseShard
+ numBlocks int
+ persistClosedTimes int
+ persistCalledTimes int
+ actualDocs = make([]doc.Document, 0)
+ expectedDocs = make([]doc.Document, 0)
+ )
+ // NB(bodu): Always align now w/ the index's view of now.
+ idx.nowFn = func() time.Time {
+ return now
+ }
+ for _, shard := range shards {
+ mockShard := NewMockdatabaseShard(ctrl)
+ mockShard.EXPECT().ID().Return(uint32(0)).AnyTimes()
+ mockShards = append(mockShards, mockShard)
+ shardMap[shard] = struct{}{}
+ dbShards = append(dbShards, mockShard)
+ }
+ earliestBlockStartToRetain := retention.FlushTimeStartForRetentionPeriod(idx.retentionPeriod, idx.blockSize, now)
+ for blockStart := earliestBlockStartToRetain; blockStart.Before(warmBlockStart); blockStart = blockStart.Add(idx.blockSize) {
+ numBlocks++
+
+ mockBlock := index.NewMockBlock(ctrl)
+ mockBlock.EXPECT().Stats(gomock.Any()).Return(nil).AnyTimes()
+ mockBlock.EXPECT().StartTime().Return(blockStart).AnyTimes()
+ mockBlock.EXPECT().EndTime().Return(blockStart.Add(idx.blockSize)).AnyTimes()
+ idx.state.blocksByTime[xtime.ToUnixNano(blockStart)] = mockBlock
+
+ mockBlock.EXPECT().Close().Return(nil)
+
+ closer := func() ([]segment.Segment, error) {
+ persistClosedTimes++
+ return nil, nil
+ }
+ persistFn := func(b segment.Builder) error {
+ persistCalledTimes++
+ actualDocs = append(actualDocs, b.Docs()...)
+ return nil
+ }
+ preparedPersist := persist.PreparedIndexPersist{
+ Close: closer,
+ Persist: persistFn,
+ }
+ mockFlush.EXPECT().PrepareIndex(xtest.CmpMatcher(persist.IndexPrepareOptions{
+ NamespaceMetadata: idx.nsMetadata,
+ BlockStart: blockStart,
+ FileSetType: persist.FileSetFlushType,
+ Shards: map[uint32]struct{}{0: {}},
+ IndexVolumeType: idxpersist.DefaultIndexVolumeType,
+ })).Return(preparedPersist, nil)
+
+ results := block.NewMockFetchBlocksMetadataResults(ctrl)
+
+ resultsID1 := ident.StringID("CACHED")
+ resultsID2 := ident.StringID("NEW")
+ doc1 := doc.Document{
+ ID: resultsID1.Bytes(),
+ Fields: []doc.Field{},
+ }
+ doc2 := doc.Document{
+ ID: resultsID2.Bytes(),
+ Fields: []doc.Field{},
+ }
+ expectedDocs = append(expectedDocs, doc1)
+ expectedDocs = append(expectedDocs, doc2)
+
+ for _, mockShard := range mockShards {
+ mockShard.EXPECT().FlushState(blockStart).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
+ mockShard.EXPECT().FlushState(blockStart.Add(blockSize)).Return(fileOpState{WarmStatus: fileOpSuccess}, nil)
+
+ resultsTags1 := ident.NewTagsIterator(ident.NewTags())
+ resultsTags2 := ident.NewTagsIterator(ident.NewTags())
+ resultsInShard := []block.FetchBlocksMetadataResult{
+ block.FetchBlocksMetadataResult{
+ ID: resultsID1,
+ Tags: resultsTags1,
+ },
+ block.FetchBlocksMetadataResult{
+ ID: resultsID2,
+ Tags: resultsTags2,
+ },
+ }
+ results.EXPECT().Results().Return(resultsInShard)
+ results.EXPECT().Close()
+
+ mockShard.EXPECT().DocRef(resultsID1).Return(doc1, true, nil)
+ mockShard.EXPECT().DocRef(resultsID2).Return(doc.Document{}, false, nil)
+
+ mockShard.EXPECT().FetchBlocksMetadataV2(gomock.Any(), blockStart, blockStart.Add(idx.blockSize),
+ gomock.Any(), gomock.Any(), block.FetchBlocksMetadataOptions{OnlyDisk: true}).Return(results, nil, nil)
+ }
+
+ mockBlock.EXPECT().IsSealed().Return(true)
+ mockBlock.EXPECT().AddResults(gomock.Any()).Return(nil)
+ mockBlock.EXPECT().EvictMutableSegments().Return(nil)
+ }
+ require.NoError(t, idx.WarmFlush(mockFlush, dbShards))
+ require.Equal(t, numBlocks, persistClosedTimes)
+ require.Equal(t, numBlocks, persistCalledTimes)
+ require.Equal(t, expectedDocs, actualDocs)
+}
+
type testIndex struct {
- index namespaceIndex
+ index NamespaceIndex
metadata namespace.Metadata
opts Options
blockSize time.Duration
@@ -342,7 +506,9 @@ func newTestIndex(t *testing.T, ctrl *gomock.Controller) testIndex {
md, err := namespace.NewMetadata(ident.StringID("testns"), nopts)
require.NoError(t, err)
opts := DefaultTestOptions()
- index, err := newNamespaceIndex(md, testShardSet, opts)
+ index, err := newNamespaceIndex(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, opts)
require.NoError(t, err)
return testIndex{
diff --git a/src/dbnode/storage/mediator.go b/src/dbnode/storage/mediator.go
index 71f343e871..f1462a82d2 100644
--- a/src/dbnode/storage/mediator.go
+++ b/src/dbnode/storage/mediator.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/clock"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/x/instrument"
@@ -33,11 +34,13 @@ import (
"go.uber.org/zap"
)
-type mediatorState int
+type (
+ mediatorState int
+)
const (
- fileOpCheckInterval = time.Second
- tickCheckInterval = 5 * time.Second
+ fileOpCheckInterval = time.Second
+ fileSystemProcessesCheckInterval = 100 * time.Millisecond
mediatorNotOpen mediatorState = iota
mediatorOpen
@@ -73,6 +76,7 @@ type mediator struct {
database database
databaseBootstrapManager
databaseFileSystemManager
+ databaseColdFlushManager
databaseTickManager
databaseRepairer
@@ -83,6 +87,7 @@ type mediator struct {
state mediatorState
mediatorTimeBarrier mediatorTimeBarrier
closedCh chan struct{}
+ tickInterval time.Duration
}
// TODO(r): Consider renaming "databaseMediator" to "databaseCoordinator"
@@ -102,14 +107,24 @@ func newMediator(database database, commitlog commitlog.CommitLog, opts Options)
state: mediatorNotOpen,
mediatorTimeBarrier: newMediatorTimeBarrier(nowFn, iOpts),
closedCh: make(chan struct{}),
+ tickInterval: opts.MediatorTickInterval(),
}
fsm := newFileSystemManager(database, commitlog, opts)
d.databaseFileSystemManager = fsm
+ // NB(bodu): Cold flush needs its own persist manager now
+ // that its running in its own thread.
+ fsOpts := opts.CommitLogOptions().FilesystemOptions()
+ pm, err := fs.NewPersistManager(fsOpts)
+ if err != nil {
+ return nil, err
+ }
+ cfm := newColdFlushManager(database, pm, opts)
+ d.databaseColdFlushManager = cfm
+
d.databaseRepairer = newNoopDatabaseRepairer()
if opts.RepairEnabled() {
- var err error
d.databaseRepairer, err = newDatabaseRepairer(database, opts)
if err != nil {
return nil, err
@@ -129,28 +144,40 @@ func (m *mediator) Open() error {
}
m.state = mediatorOpen
go m.reportLoop()
- go m.ongoingFilesystemProcesses()
+ go m.ongoingFileSystemProcesses()
+ go m.ongoingColdFlushProcesses()
go m.ongoingTick()
m.databaseRepairer.Start()
return nil
}
-func (m *mediator) DisableFileOps() {
+func (m *mediator) DisableFileOpsAndWait() {
status := m.databaseFileSystemManager.Disable()
for status == fileOpInProgress {
m.sleepFn(fileOpCheckInterval)
status = m.databaseFileSystemManager.Status()
}
+ // Even though the cold flush runs separately, its still
+ // considered a fs process.
+ status = m.databaseColdFlushManager.Disable()
+ for status == fileOpInProgress {
+ m.sleepFn(fileOpCheckInterval)
+ status = m.databaseColdFlushManager.Status()
+ }
}
func (m *mediator) EnableFileOps() {
m.databaseFileSystemManager.Enable()
+ // Even though the cold flush runs separately, its still
+ // considered a fs process.
+ m.databaseColdFlushManager.Enable()
}
func (m *mediator) Report() {
m.databaseBootstrapManager.Report()
m.databaseRepairer.Report()
m.databaseFileSystemManager.Report()
+ m.databaseColdFlushManager.Report()
}
func (m *mediator) Close() error {
@@ -168,7 +195,7 @@ func (m *mediator) Close() error {
return nil
}
-// The mediator mediates the relationship between ticks and flushes(warm and cold)/snapshots/cleanups.
+// The mediator mediates the relationship between ticks and warm flushes/snapshots.
//
// For example, the requirements to perform a flush are:
// 1) currentTime > blockStart.Add(blockSize).Add(bufferPast)
@@ -182,22 +209,41 @@ func (m *mediator) Close() error {
// is potentially still on disk (if it hasn't been cleaned up yet).
//
// See comment over mediatorTimeBarrier for more details on how this is implemented.
-func (m *mediator) ongoingFilesystemProcesses() {
- log := m.opts.InstrumentOptions().Logger()
+func (m *mediator) ongoingFileSystemProcesses() {
for {
select {
case <-m.closedCh:
return
default:
- m.sleepFn(tickCheckInterval)
- // See comment over mediatorTimeBarrier for an explanation of this logic.
- mediatorTime, err := m.mediatorTimeBarrier.fsProcessesWait()
- if err != nil {
- log.Error("error within ongoingFilesystemProcesses waiting for next mediatorTime", zap.Error(err))
- continue
+ m.sleepFn(m.tickInterval)
+
+ // Check if the mediator is already closed.
+ if !m.isOpen() {
+ return
}
- m.databaseFileSystemManager.Run(mediatorTime, syncRun, noForce)
+ m.runFileSystemProcesses()
+ }
+ }
+}
+
+// The mediator mediates the relationship between ticks and cold flushes/cleanup the same way it does for warm flushes/snapshots.
+// We want to begin each cold/warm flush with an in sync view of time as a tick.
+// NB(bodu): Cold flushes and cleanup have been separated out into it's own thread to avoid blocking snapshots.
+func (m *mediator) ongoingColdFlushProcesses() {
+ for {
+ select {
+ case <-m.closedCh:
+ return
+ default:
+ m.sleepFn(m.tickInterval)
+
+ // Check if the mediator is already closed.
+ if !m.isOpen() {
+ return
+ }
+
+ m.runColdFlushProcesses()
}
}
}
@@ -212,24 +258,54 @@ func (m *mediator) ongoingTick() {
case <-m.closedCh:
return
default:
- m.sleepFn(tickCheckInterval)
+ m.sleepFn(m.tickInterval)
+
+ // Check if the mediator is already closed.
+ if !m.isOpen() {
+ return
+ }
// See comment over mediatorTimeBarrier for an explanation of this logic.
newMediatorTime, err := m.mediatorTimeBarrier.maybeRelease()
if err != nil {
- log.Error(
- "ongoing tick was unable to release time barrier", zap.Error(err))
+ log.Error("ongoing tick was unable to release time barrier", zap.Error(err))
continue
}
mediatorTime = newMediatorTime
- if err := m.Tick(force, mediatorTime); err != nil {
+ // NB(bodu): We may still hit a db closed error here since the db does not wait upon
+ // completion of ticks.
+ if err := m.Tick(force, mediatorTime); err != nil && err != errDatabaseIsClosed {
log.Error("error within tick", zap.Error(err))
}
}
}
}
+func (m *mediator) runFileSystemProcesses() {
+ // See comment over mediatorTimeBarrier for an explanation of this logic.
+ log := m.opts.InstrumentOptions().Logger()
+ mediatorTime, err := m.mediatorTimeBarrier.fsProcessesWait()
+ if err != nil {
+ log.Error("error within ongoingFileSystemProcesses waiting for next mediatorTime", zap.Error(err))
+ return
+ }
+
+ m.databaseFileSystemManager.Run(mediatorTime, syncRun, noForce)
+}
+
+func (m *mediator) runColdFlushProcesses() {
+ // See comment over mediatorTimeBarrier for an explanation of this logic.
+ log := m.opts.InstrumentOptions().Logger()
+ mediatorTime, err := m.mediatorTimeBarrier.fsProcessesWait()
+ if err != nil {
+ log.Error("error within ongoingColdFlushProcesses waiting for next mediatorTime", zap.Error(err))
+ return
+ }
+
+ m.databaseColdFlushManager.Run(mediatorTime)
+}
+
func (m *mediator) reportLoop() {
interval := m.opts.InstrumentOptions().ReportInterval()
t := time.NewTicker(interval)
@@ -245,6 +321,12 @@ func (m *mediator) reportLoop() {
}
}
+func (m *mediator) isOpen() bool {
+ m.RLock()
+ defer m.RUnlock()
+ return m.state == mediatorOpen
+}
+
// mediatorTimeBarrier is used to prevent the tick process and the filesystem processes from ever running
// concurrently with an inconsistent view of time. Each time the filesystem processes want to run they first
// register for the next barrier by calling fsProcessesWait(). Once a tick completes it will call maybeRelease()
@@ -262,42 +344,65 @@ func (m *mediator) reportLoop() {
// This means that once a run of filesystem processes completes it will always have to wait until the currently
// executing tick completes before performing the next run, but in practice this should not be much of an issue.
//
-// ____________ ___________
-// | Flush (t0) | | Tick (t0) |
-// | | | |
-// | | |___________|
-// | | ___________
-// | | | Tick (t0) |
-// | | | |
-// | | |___________|
-// | | ___________
-// |____________| | Tick (t0) |
-// barrier.wait() | |
-// |___________|
-// mediatorTime = t1
-// barrier.release()
-// -------------------------------------
-// ____________ ___________
-// | Flush (t1) | | Tick (t1) |
-// | | | |
-// | | |___________|
-// | | ___________
-// | | | Tick (t1) |
-// | | | |
+// Additionally, an independent cold flush process complicates this a bit more in that we have more than one filesystem
+// process waiting on the mediator barrier. The invariant here is that both warm and cold flushes always start on a tick
+// with a consistent view of time as the tick it is on. They don't necessarily need to start on the same tick. See the
+// diagram below for an example case.
+//
+// ____________ ___________ _________________
+// | Flush (t0) | | Tick (t0) | | Cold Flush (t0) |
+// | | | | | |
+// | | |___________| | |
+// | | ___________ | |
+// | | | Tick (t0) | | |
+// | | | | | |
+// | | |___________| | |
+// | | ___________ | |
+// |____________| | Tick (t0) | | |
+// barrier.wait() | | | |
+// |___________| | |
+// mediatorTime = t1 | |
+// barrier.release() | |
+// ____________ ___________ | |
+// | Flush (t1) | | Tick (t1) | |_________________|
+// | | | | barrier.wait()
// | | |___________|
-// | | ___________
-// |____________| | Tick (t1) |
-// barrier.wait() | |
-// |___________|
-// barrier.release()
-// ------------------------------------
+// | | mediatorTime = t2
+// | | barrier.release()
+// | | ___________ _________________
+// | | | Tick (t2) | | Cold Flush (t2) |
+// |____________| | | | |
+// barrier.wait() |___________| | |
+// mediatorTime = t3 | |
+// barrier.release() | |
+// ____________ ___________ | |
+// | Flush (t3) | | Tick (t3) | | |
+// | | | | | |
+// | | |___________| | |
+// | | ___________ | |
+// | | | Tick (t3) | | |
+// | | | | | |
+// | | |___________| | |
+// | | ___________ | |
+// |____________| | Tick (t3) | |_________________|
+// barrier.wait() | | barrier.wait()
+// |___________|
+// mediatorTime = t4
+// barrier.release()
+// ____________ ___________ _________________
+// | Flush (t4) | | Tick (t4) | | Cold Flush (t4) |
+// | | | | | |
+// ------------------------------------------------------------
type mediatorTimeBarrier struct {
sync.Mutex
- mediatorTime time.Time
- nowFn func() time.Time
- iOpts instrument.Options
- fsProcessesWaiting bool
- releaseCh chan time.Time
+ // Both mediatorTime and numFsProcessesWaiting are protected
+ // by the mutex.
+ mediatorTime time.Time
+ numFsProcessesWaiting int
+
+ nowFn func() time.Time
+ iOpts instrument.Options
+ releaseCh chan time.Time
}
// initialMediatorTime should only be used to obtain the initial time for
@@ -311,28 +416,24 @@ func (b *mediatorTimeBarrier) initialMediatorTime() time.Time {
func (b *mediatorTimeBarrier) fsProcessesWait() (time.Time, error) {
b.Lock()
- if b.fsProcessesWaiting {
- b.Unlock()
- return time.Time{}, errMediatorTimeBarrierAlreadyWaiting
- }
- b.fsProcessesWaiting = true
+ b.numFsProcessesWaiting++
b.Unlock()
t := <-b.releaseCh
b.Lock()
- b.fsProcessesWaiting = false
+ b.numFsProcessesWaiting--
b.Unlock()
return t, nil
}
func (b *mediatorTimeBarrier) maybeRelease() (time.Time, error) {
b.Lock()
- hasWaiter := b.fsProcessesWaiting
+ numWaiters := b.numFsProcessesWaiting
mediatorTime := b.mediatorTime
b.Unlock()
- if !hasWaiter {
+ if numWaiters == 0 {
// If there isn't a waiter yet then the filesystem processes may still
// be ongoing in which case we don't want to release the barrier / update
// the current time yet. Allow the tick to run again with the same time
@@ -353,7 +454,9 @@ func (b *mediatorTimeBarrier) maybeRelease() (time.Time, error) {
}
b.mediatorTime = newMediatorTime
- b.releaseCh <- b.mediatorTime
+ for i := 0; i < numWaiters; i++ {
+ b.releaseCh <- b.mediatorTime
+ }
return b.mediatorTime, nil
}
diff --git a/src/dbnode/storage/mediator_test.go b/src/dbnode/storage/mediator_test.go
index adb9fd152c..14056d4fca 100644
--- a/src/dbnode/storage/mediator_test.go
+++ b/src/dbnode/storage/mediator_test.go
@@ -42,7 +42,7 @@ func TestDatabaseMediatorOpenClose(t *testing.T) {
db := NewMockdatabase(ctrl)
db.EXPECT().Options().Return(opts).AnyTimes()
- db.EXPECT().GetOwnedNamespaces().Return(nil, nil).AnyTimes()
+ db.EXPECT().OwnedNamespaces().Return(nil, nil).AnyTimes()
db.EXPECT().BootstrapState().Return(DatabaseBootstrapState{}).AnyTimes()
m, err := newMediator(db, nil, opts)
require.NoError(t, err)
@@ -56,7 +56,7 @@ func TestDatabaseMediatorOpenClose(t *testing.T) {
require.Equal(t, errMediatorAlreadyClosed, m.Close())
}
-func TestDatabaseMediatorDisableFileOps(t *testing.T) {
+func TestDatabaseMediatorDisableFileOpsAndWait(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -86,6 +86,6 @@ func TestDatabaseMediatorDisableFileOps(t *testing.T) {
fsm.EXPECT().Status().Return(fileOpNotStarted),
)
- m.DisableFileOps()
+ m.DisableFileOpsAndWait()
require.Equal(t, 3, len(slept))
}
diff --git a/src/dbnode/storage/namespace.go b/src/dbnode/storage/namespace.go
index fc672976a3..04f0459bd5 100644
--- a/src/dbnode/storage/namespace.go
+++ b/src/dbnode/storage/namespace.go
@@ -40,6 +40,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/context"
@@ -127,7 +128,7 @@ type dbNamespace struct {
increasingIndex increasingIndex
commitLogWriter commitLogWriter
- reverseIndex namespaceIndex
+ reverseIndex NamespaceIndex
tickWorkers xsync.WorkerPool
tickWorkersConcurrency int
@@ -150,24 +151,25 @@ type databaseNamespaceIndexStatsLastTick struct {
}
type databaseNamespaceMetrics struct {
- bootstrap instrument.MethodMetrics
- flushWarmData instrument.MethodMetrics
- flushColdData instrument.MethodMetrics
- flushIndex instrument.MethodMetrics
- snapshot instrument.MethodMetrics
- write instrument.MethodMetrics
- writeTagged instrument.MethodMetrics
- read instrument.MethodMetrics
- fetchBlocks instrument.MethodMetrics
- fetchBlocksMetadata instrument.MethodMetrics
- queryIDs instrument.MethodMetrics
- aggregateQuery instrument.MethodMetrics
- unfulfilled tally.Counter
- bootstrapStart tally.Counter
- bootstrapEnd tally.Counter
- shards databaseNamespaceShardMetrics
- tick databaseNamespaceTickMetrics
- status databaseNamespaceStatusMetrics
+ bootstrap instrument.MethodMetrics
+ flushWarmData instrument.MethodMetrics
+ flushColdData instrument.MethodMetrics
+ flushIndex instrument.MethodMetrics
+ snapshot instrument.MethodMetrics
+ write instrument.MethodMetrics
+ writeTagged instrument.MethodMetrics
+ read instrument.MethodMetrics
+ fetchBlocks instrument.MethodMetrics
+ fetchBlocksMetadata instrument.MethodMetrics
+ queryIDs instrument.MethodMetrics
+ aggregateQuery instrument.MethodMetrics
+ unfulfilled tally.Counter
+ bootstrapStart tally.Counter
+ bootstrapEnd tally.Counter
+ snapshotSeriesPersist tally.Counter
+ shards databaseNamespaceShardMetrics
+ tick databaseNamespaceTickMetrics
+ status databaseNamespaceStatusMetrics
}
type databaseNamespaceShardMetrics struct {
@@ -215,7 +217,10 @@ type databaseNamespaceIndexStatusMetrics struct {
numSegments tally.Gauge
}
-func newDatabaseNamespaceMetrics(scope tally.Scope, samplingRate float64) databaseNamespaceMetrics {
+func newDatabaseNamespaceMetrics(
+ scope tally.Scope,
+ opts instrument.TimerOptions,
+) databaseNamespaceMetrics {
const (
// NB: tally.Timer when backed by a Prometheus Summary type is *very* expensive
// for high frequency measurements. Overriding sampling rate for writes to avoid this issue.
@@ -228,22 +233,25 @@ func newDatabaseNamespaceMetrics(scope tally.Scope, samplingRate float64) databa
indexTickScope := tickScope.SubScope("index")
statusScope := scope.SubScope("status")
indexStatusScope := statusScope.SubScope("index")
+ bootstrapScope := scope.SubScope("bootstrap")
+ snapshotScope := scope.SubScope("snapshot")
return databaseNamespaceMetrics{
- bootstrap: instrument.NewMethodMetrics(scope, "bootstrap", samplingRate),
- flushWarmData: instrument.NewMethodMetrics(scope, "flushWarmData", samplingRate),
- flushColdData: instrument.NewMethodMetrics(scope, "flushColdData", samplingRate),
- flushIndex: instrument.NewMethodMetrics(scope, "flushIndex", samplingRate),
- snapshot: instrument.NewMethodMetrics(scope, "snapshot", samplingRate),
- write: instrument.NewMethodMetrics(scope, "write", overrideWriteSamplingRate),
- writeTagged: instrument.NewMethodMetrics(scope, "write-tagged", overrideWriteSamplingRate),
- read: instrument.NewMethodMetrics(scope, "read", samplingRate),
- fetchBlocks: instrument.NewMethodMetrics(scope, "fetchBlocks", samplingRate),
- fetchBlocksMetadata: instrument.NewMethodMetrics(scope, "fetchBlocksMetadata", samplingRate),
- queryIDs: instrument.NewMethodMetrics(scope, "queryIDs", samplingRate),
- aggregateQuery: instrument.NewMethodMetrics(scope, "aggregateQuery", samplingRate),
- unfulfilled: scope.Counter("bootstrap.unfulfilled"),
- bootstrapStart: scope.Counter("bootstrap.start"),
- bootstrapEnd: scope.Counter("bootstrap.end"),
+ bootstrap: instrument.NewMethodMetrics(scope, "bootstrap", opts),
+ flushWarmData: instrument.NewMethodMetrics(scope, "flushWarmData", opts),
+ flushColdData: instrument.NewMethodMetrics(scope, "flushColdData", opts),
+ flushIndex: instrument.NewMethodMetrics(scope, "flushIndex", opts),
+ snapshot: instrument.NewMethodMetrics(scope, "snapshot", opts),
+ write: instrument.NewMethodMetrics(scope, "write", opts),
+ writeTagged: instrument.NewMethodMetrics(scope, "write-tagged", opts),
+ read: instrument.NewMethodMetrics(scope, "read", opts),
+ fetchBlocks: instrument.NewMethodMetrics(scope, "fetchBlocks", opts),
+ fetchBlocksMetadata: instrument.NewMethodMetrics(scope, "fetchBlocksMetadata", opts),
+ queryIDs: instrument.NewMethodMetrics(scope, "queryIDs", opts),
+ aggregateQuery: instrument.NewMethodMetrics(scope, "aggregateQuery", opts),
+ unfulfilled: bootstrapScope.Counter("unfulfilled"),
+ bootstrapStart: bootstrapScope.Counter("start"),
+ bootstrapEnd: bootstrapScope.Counter("end"),
+ snapshotSeriesPersist: snapshotScope.Counter("series-persist"),
shards: databaseNamespaceShardMetrics{
add: shardsScope.Counter("add"),
close: shardsScope.Counter("close"),
@@ -283,6 +291,7 @@ func newDatabaseNamespaceMetrics(scope tally.Scope, samplingRate float64) databa
func newDatabaseNamespace(
metadata namespace.Metadata,
+ namespaceRuntimeOptsMgr namespace.RuntimeOptionsManager,
shardSet sharding.ShardSet,
blockRetriever block.DatabaseBlockRetriever,
increasingIndex increasingIndex,
@@ -299,13 +308,14 @@ func newDatabaseNamespace(
iops := opts.InstrumentOptions()
logger := iops.Logger().With(zap.String("namespace", id.String()))
- iops = iops.SetLogger(logger)
+ iops = iops.
+ SetLogger(logger).
+ SetMetricsScope(iops.MetricsScope().Tagged(map[string]string{
+ "namespace": id.String(),
+ }))
opts = opts.SetInstrumentOptions(iops)
- scope := iops.MetricsScope().SubScope("database").
- Tagged(map[string]string{
- "namespace": id.String(),
- })
+ scope := iops.MetricsScope().SubScope("database")
tickWorkersConcurrency := int(math.Max(1, float64(runtime.NumCPU())/8))
tickWorkers := xsync.NewWorkerPool(tickWorkersConcurrency)
@@ -321,11 +331,12 @@ func newDatabaseNamespace(
}
var (
- index namespaceIndex
+ index NamespaceIndex
err error
)
if metadata.Options().IndexOptions().Enabled() {
- index, err = newNamespaceIndex(metadata, shardSet, opts)
+ index, err = newNamespaceIndex(metadata, namespaceRuntimeOptsMgr,
+ shardSet, opts)
if err != nil {
return nil, err
}
@@ -349,7 +360,7 @@ func newDatabaseNamespace(
reverseIndex: index,
tickWorkers: tickWorkers,
tickWorkersConcurrency: tickWorkersConcurrency,
- metrics: newDatabaseNamespaceMetrics(scope, iops.MetricsSamplingRate()),
+ metrics: newDatabaseNamespaceMetrics(scope, iops.TimerOptions()),
}
sl, err := opts.SchemaRegistry().RegisterListener(id, n)
@@ -361,7 +372,10 @@ func newDatabaseNamespace(
metadata.ID().String(), err)
}
n.schemaListener = sl
- n.initShards(nopts.BootstrapEnabled())
+ n.assignShardSet(shardSet, assignShardSetOptions{
+ needsBootstrap: nopts.BootstrapEnabled(),
+ initialAssignment: true,
+ })
go n.reportStatusLoop(opts.InstrumentOptions().ReportInterval())
return n, nil
@@ -411,6 +425,10 @@ func (n *dbNamespace) Options() namespace.Options {
return n.nopts
}
+func (n *dbNamespace) StorageOptions() Options {
+ return n.opts
+}
+
func (n *dbNamespace) ID() ident.ID {
return n.id
}
@@ -432,7 +450,7 @@ func (n *dbNamespace) Schema() namespace.SchemaDescr {
func (n *dbNamespace) NumSeries() int64 {
var count int64
- for _, shard := range n.GetOwnedShards() {
+ for _, shard := range n.OwnedShards() {
count += shard.NumSeries()
}
return count
@@ -450,6 +468,21 @@ func (n *dbNamespace) Shards() []Shard {
}
func (n *dbNamespace) AssignShardSet(shardSet sharding.ShardSet) {
+ n.assignShardSet(shardSet, assignShardSetOptions{
+ needsBootstrap: n.nopts.BootstrapEnabled(),
+ initialAssignment: false,
+ })
+}
+
+type assignShardSetOptions struct {
+ needsBootstrap bool
+ initialAssignment bool
+}
+
+func (n *dbNamespace) assignShardSet(
+ shardSet sharding.ShardSet,
+ opts assignShardSetOptions,
+) {
var (
incoming = make(map[uint32]struct{}, len(shardSet.All()))
existing []databaseShard
@@ -473,19 +506,35 @@ func (n *dbNamespace) AssignShardSet(shardSet sharding.ShardSet) {
n.shardSet = shardSet
n.shards = make([]databaseShard, n.shardSet.Max()+1)
for _, shard := range n.shardSet.AllIDs() {
- if int(shard) < len(existing) && existing[shard] != nil {
+ // We create shards if its an initial assignment or if its not an initial assignment
+ // and the shard doesn't already exist.
+ if !opts.initialAssignment && int(shard) < len(existing) && existing[shard] != nil {
n.shards[shard] = existing[shard]
- } else {
- bootstrapEnabled := n.nopts.BootstrapEnabled()
- n.shards[shard] = newDatabaseShard(metadata, shard, n.blockRetriever,
- n.namespaceReaderMgr, n.increasingIndex, n.reverseIndex,
- bootstrapEnabled, n.opts, n.seriesOpts)
+ continue
+ }
+
+ // Otherwise it's the initial assignment or there isn't an existing
+ // shard created for this shard ID.
+ n.shards[shard] = newDatabaseShard(metadata, shard, n.blockRetriever,
+ n.namespaceReaderMgr, n.increasingIndex, n.reverseIndex,
+ opts.needsBootstrap, n.opts, n.seriesOpts)
+ // NB(bodu): We only record shard add metrics for shards created in non
+ // initial assignments.
+ if !opts.initialAssignment {
n.metrics.shards.add.Inc(1)
}
}
+
if idx := n.reverseIndex; idx != nil {
idx.AssignShardSet(shardSet)
}
+ if br := n.blockRetriever; br != nil {
+ br.AssignShardSet(shardSet)
+ }
+ if mgr := n.namespaceReaderMgr; mgr != nil {
+ mgr.assignShardSet(shardSet)
+ }
+
n.Unlock()
n.closeShards(closing, false)
}
@@ -530,7 +579,7 @@ func (n *dbNamespace) Tick(c context.Cancellable, startTime time.Time) error {
n.namespaceReaderMgr.tick()
// Fetch the owned shards.
- shards := n.GetOwnedShards()
+ shards := n.OwnedShards()
if len(shards) == 0 {
return nil
}
@@ -622,21 +671,21 @@ func (n *dbNamespace) Write(
value float64,
unit xtime.Unit,
annotation []byte,
-) (ts.Series, bool, error) {
+) (SeriesWrite, error) {
callStart := n.nowFn()
shard, nsCtx, err := n.shardFor(id)
if err != nil {
n.metrics.write.ReportError(n.nowFn().Sub(callStart))
- return ts.Series{}, false, err
+ return SeriesWrite{}, err
}
opts := series.WriteOptions{
TruncateType: n.opts.TruncateType(),
SchemaDesc: nsCtx.Schema,
}
- series, wasWritten, err := shard.Write(ctx, id, timestamp,
+ seriesWrite, err := shard.Write(ctx, id, timestamp,
value, unit, annotation, opts)
n.metrics.write.ReportSuccessOrError(err, n.nowFn().Sub(callStart))
- return series, wasWritten, err
+ return seriesWrite, err
}
func (n *dbNamespace) WriteTagged(
@@ -647,25 +696,34 @@ func (n *dbNamespace) WriteTagged(
value float64,
unit xtime.Unit,
annotation []byte,
-) (ts.Series, bool, error) {
+) (SeriesWrite, error) {
callStart := n.nowFn()
if n.reverseIndex == nil { // only happens if indexing is enabled.
n.metrics.writeTagged.ReportError(n.nowFn().Sub(callStart))
- return ts.Series{}, false, errNamespaceIndexingDisabled
+ return SeriesWrite{}, errNamespaceIndexingDisabled
}
shard, nsCtx, err := n.shardFor(id)
if err != nil {
n.metrics.writeTagged.ReportError(n.nowFn().Sub(callStart))
- return ts.Series{}, false, err
+ return SeriesWrite{}, err
}
opts := series.WriteOptions{
TruncateType: n.opts.TruncateType(),
SchemaDesc: nsCtx.Schema,
}
- series, wasWritten, err := shard.WriteTagged(ctx, id, tags, timestamp,
+ seriesWrite, err := shard.WriteTagged(ctx, id, tags, timestamp,
value, unit, annotation, opts)
n.metrics.writeTagged.ReportSuccessOrError(err, n.nowFn().Sub(callStart))
- return series, wasWritten, err
+ return seriesWrite, err
+}
+
+func (n *dbNamespace) WritePendingIndexInserts(
+ pending []writes.PendingIndexInsert,
+) error {
+ if n.reverseIndex == nil { // only happens if indexing is enabled.
+ return errNamespaceIndexingDisabled
+ }
+ return n.reverseIndex.WritePending(pending)
}
func (n *dbNamespace) SeriesReadWriteRef(
@@ -698,7 +756,8 @@ func (n *dbNamespace) QueryIDs(
sp.LogFields(
opentracinglog.String("query", query.String()),
opentracinglog.String("namespace", n.ID().String()),
- opentracinglog.Int("limit", opts.Limit),
+ opentracinglog.Int("seriesLimit", opts.SeriesLimit),
+ opentracinglog.Int("docsLimit", opts.DocsLimit),
xopentracing.Time("start", opts.StartInclusive),
xopentracing.Time("end", opts.EndExclusive),
)
@@ -753,12 +812,19 @@ func (n *dbNamespace) AggregateQuery(
return res, err
}
-func (n *dbNamespace) PrepareBootstrap() ([]databaseShard, error) {
+func (n *dbNamespace) PrepareBootstrap(ctx context.Context) ([]databaseShard, error) {
+ ctx, span, sampled := ctx.StartSampledTraceSpan(tracepoint.NSPrepareBootstrap)
+ defer span.Finish()
+
+ if sampled {
+ span.LogFields(opentracinglog.String("namespace", n.id.String()))
+ }
+
var (
wg sync.WaitGroup
multiErrLock sync.Mutex
multiErr xerrors.MultiError
- shards = n.GetOwnedShards()
+ shards = n.OwnedShards()
)
for _, shard := range shards {
shard := shard
@@ -766,7 +832,7 @@ func (n *dbNamespace) PrepareBootstrap() ([]databaseShard, error) {
go func() {
defer wg.Done()
- err := shard.PrepareBootstrap()
+ err := shard.PrepareBootstrap(ctx)
if err != nil {
multiErrLock.Lock()
multiErr = multiErr.Add(err)
@@ -840,8 +906,16 @@ func (n *dbNamespace) FetchBlocksMetadataV2(
}
func (n *dbNamespace) Bootstrap(
+ ctx context.Context,
bootstrapResult bootstrap.NamespaceResult,
) error {
+ ctx, span, sampled := ctx.StartSampledTraceSpan(tracepoint.NSBootstrap)
+ defer span.Finish()
+
+ if sampled {
+ span.LogFields(opentracinglog.String("namespace", n.id.String()))
+ }
+
callStart := n.nowFn()
n.Lock()
@@ -851,6 +925,7 @@ func (n *dbNamespace) Bootstrap(
return errNamespaceIsBootstrapping
}
n.bootstrapState = Bootstrapping
+ nsCtx := n.nsContextWithRLock()
n.Unlock()
n.metrics.bootstrapStart.Inc(1)
@@ -886,7 +961,7 @@ func (n *dbNamespace) Bootstrap(
n.log.Info("bootstrap marking all shards as bootstrapped",
zap.Stringer("namespace", n.id),
zap.Int("numShards", len(bootstrappedShards)))
- for _, shard := range n.GetOwnedShards() {
+ for _, shard := range n.OwnedShards() {
// Make sure it was bootstrapped during this bootstrap run.
shardID := shard.ID()
bootstrapped := false
@@ -916,7 +991,7 @@ func (n *dbNamespace) Bootstrap(
wg.Add(1)
shard := shard
workers.Go(func() {
- err := shard.Bootstrap()
+ err := shard.Bootstrap(ctx, nsCtx)
mutex.Lock()
multiErr = multiErr.Add(err)
@@ -939,7 +1014,7 @@ func (n *dbNamespace) Bootstrap(
bootstrapType string,
unfulfilled result.ShardTimeRanges,
) error {
- shardsUnfulfilled := int64(len(unfulfilled))
+ shardsUnfulfilled := int64(unfulfilled.Len())
n.metrics.unfulfilled.Inc(shardsUnfulfilled)
if shardsUnfulfilled == 0 {
return nil
@@ -1002,7 +1077,7 @@ func (n *dbNamespace) WarmFlush(
}
multiErr := xerrors.NewMultiError()
- shards := n.GetOwnedShards()
+ shards := n.OwnedShards()
for _, shard := range shards {
if !shard.IsBootstrapped() {
n.log.
@@ -1037,7 +1112,7 @@ func (n *dbNamespace) WarmFlush(
// idAndBlockStart is the composite key for the genny map used to keep track of
// dirty series that need to be ColdFlushed.
type idAndBlockStart struct {
- id ident.ID
+ id []byte
blockStart xtime.UnixNano
}
@@ -1070,8 +1145,7 @@ func newColdFlushReuseableResources(opts Options) (coldFlushReuseableResources,
}
return coldFlushReuseableResources{
- // TODO(juchan): consider setting these options.
- dirtySeries: newDirtySeriesMap(dirtySeriesMapOptions{}),
+ dirtySeries: newDirtySeriesMap(),
dirtySeriesToWrite: make(map[xtime.UnixNano]*idList),
// TODO(juchan): set pool options.
idElementPool: newIDElementPool(nil),
@@ -1112,21 +1186,66 @@ func (n *dbNamespace) ColdFlush(flushPersist persist.FlushPreparer) error {
return nil
}
- multiErr := xerrors.NewMultiError()
- shards := n.GetOwnedShards()
+ shards := n.OwnedShards()
resources, err := newColdFlushReuseableResources(n.opts)
if err != nil {
+ n.metrics.flushColdData.ReportError(n.nowFn().Sub(callStart))
return err
}
+
+ // NB(bodu): The in-mem index will lag behind the TSDB in terms of new series writes. For a period of
+ // time between when we rotate out the active cold mutable index segments (happens here) and when
+ // we actually cold flush the data to disk we will be making writes to the newly active mutable seg.
+ // This means that some series can live doubly in-mem and loaded from disk until the next cold flush
+ // where they will be evicted from the in-mem index.
+ var (
+ onColdFlushDone OnColdFlushDone
+ )
+ if n.reverseIndex != nil {
+ onColdFlushDone, err = n.reverseIndex.ColdFlush(shards)
+ if err != nil {
+ n.metrics.flushColdData.ReportError(n.nowFn().Sub(callStart))
+ return err
+ }
+ }
+
+ onColdFlushNs, err := n.opts.OnColdFlush().ColdFlushNamespace(n)
+ if err != nil {
+ n.metrics.flushColdData.ReportError(n.nowFn().Sub(callStart))
+ return err
+ }
+
+ // NB(bodu): Deferred shard cold flushes so that we can ensure that cold flush index data is
+ // persisted before persisting TSDB data to ensure crash consistency.
+ multiErr := xerrors.NewMultiError()
+ shardColdFlushes := make([]ShardColdFlush, 0, len(shards))
for _, shard := range shards {
- err := shard.ColdFlush(flushPersist, resources, nsCtx)
+ shardColdFlush, err := shard.ColdFlush(flushPersist, resources, nsCtx, onColdFlushNs)
if err != nil {
detailedErr := fmt.Errorf("shard %d failed to compact: %v", shard.ID(), err)
multiErr = multiErr.Add(detailedErr)
- // Continue with remaining shards.
+ continue
+ }
+ shardColdFlushes = append(shardColdFlushes, shardColdFlush)
+ }
+
+ // We go through this error checking process to allow for partially successful flushes.
+ indexColdFlushError := onColdFlushNs.Done()
+ if indexColdFlushError == nil && onColdFlushDone != nil {
+ // Only evict rotated cold mutable index segments if the index cold flush was sucessful
+ // or we will lose queryability of data that's still in mem.
+ indexColdFlushError = onColdFlushDone()
+ }
+ if indexColdFlushError == nil {
+ // NB(bodu): We only want to complete data cold flushes if the index cold flush
+ // is successful. If index cold flush is successful, we want to attempt writing
+ // of checkpoint files to complete the cold data flush lifecycle for successful shards.
+ for _, shardColdFlush := range shardColdFlushes {
+ multiErr = multiErr.Add(shardColdFlush.Done())
}
}
+ multiErr = multiErr.Add(indexColdFlushError)
res := multiErr.FinalError()
n.metrics.flushColdData.ReportSuccessOrError(res, n.nowFn().Sub(callStart))
@@ -1148,8 +1267,8 @@ func (n *dbNamespace) FlushIndex(flush persist.IndexFlush) error {
return nil
}
- shards := n.GetOwnedShards()
- err := n.reverseIndex.Flush(flush, shards)
+ shards := n.OwnedShards()
+ err := n.reverseIndex.WarmFlush(flush, shards)
n.metrics.flushIndex.ReportSuccessOrError(err, n.nowFn().Sub(callStart))
return err
}
@@ -1182,17 +1301,23 @@ func (n *dbNamespace) Snapshot(
return nil
}
- multiErr := xerrors.NewMultiError()
- shards := n.GetOwnedShards()
- for _, shard := range shards {
- err := shard.Snapshot(blockStart, snapshotTime, snapshotPersist, nsCtx)
+ var (
+ seriesPersist int
+ multiErr xerrors.MultiError
+ )
+ for _, shard := range n.OwnedShards() {
+ result, err := shard.Snapshot(blockStart, snapshotTime, snapshotPersist, nsCtx)
if err != nil {
detailedErr := fmt.Errorf("shard %d failed to snapshot: %v", shard.ID(), err)
multiErr = multiErr.Add(detailedErr)
// Continue with remaining shards
}
+
+ seriesPersist += result.SeriesPersist
}
+ n.metrics.snapshotSeriesPersist.Inc(int64(seriesPersist))
+
res := multiErr.FinalError()
n.metrics.snapshot.ReportSuccessOrError(res, n.nowFn().Sub(callStart))
return res
@@ -1257,7 +1382,10 @@ func (n *dbNamespace) Truncate() (int64, error) {
// namespace, which means the memory will be reclaimed the next time GC kicks in and returns the
// reclaimed memory to the OS. In the future, we might investigate whether it's worth returning
// the pooled objects to the pools if the pool is low and needs replenishing.
- n.initShards(false)
+ n.assignShardSet(n.shardSet, assignShardSetOptions{
+ needsBootstrap: false,
+ initialAssignment: true,
+ })
// NB(xichen): possibly also clean up disk files and force a GC here to reclaim memory immediately
return totalNumSeries, nil
@@ -1285,7 +1413,7 @@ func (n *dbNamespace) Repair(
)
multiErr := xerrors.NewMultiError()
- shards := n.GetOwnedShards()
+ shards := n.OwnedShards()
numShards := len(shards)
if numShards > 0 {
throttlePerShard = time.Duration(
@@ -1349,7 +1477,7 @@ func (n *dbNamespace) Repair(
return multiErr.FinalError()
}
-func (n *dbNamespace) GetOwnedShards() []databaseShard {
+func (n *dbNamespace) OwnedShards() []databaseShard {
n.RLock()
shards := n.shardSet.AllIDs()
databaseShards := make([]databaseShard, len(shards))
@@ -1360,7 +1488,7 @@ func (n *dbNamespace) GetOwnedShards() []databaseShard {
return databaseShards
}
-func (n *dbNamespace) GetIndex() (namespaceIndex, error) {
+func (n *dbNamespace) Index() (NamespaceIndex, error) {
n.RLock()
defer n.RUnlock()
if !n.metadata.Options().IndexOptions().Enabled() {
@@ -1421,19 +1549,6 @@ func (n *dbNamespace) readableShardAtWithRLock(shardID uint32) (databaseShard, e
return shard, nil
}
-func (n *dbNamespace) initShards(needBootstrap bool) {
- n.Lock()
- shards := n.shardSet.AllIDs()
- dbShards := make([]databaseShard, n.shardSet.Max()+1)
- for _, shard := range shards {
- dbShards[shard] = newDatabaseShard(n.metadata, shard, n.blockRetriever,
- n.namespaceReaderMgr, n.increasingIndex, n.reverseIndex,
- needBootstrap, n.opts, n.seriesOpts)
- }
- n.shards = dbShards
- n.Unlock()
-}
-
func (n *dbNamespace) Close() error {
n.Lock()
if n.closed {
diff --git a/src/dbnode/storage/namespace_new_map_gen.go b/src/dbnode/storage/namespace_new_map_gen.go
index baa3983598..7cfa5a0b25 100644
--- a/src/dbnode/storage/namespace_new_map_gen.go
+++ b/src/dbnode/storage/namespace_new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/storage/namespace_readers.go b/src/dbnode/storage/namespace_readers.go
index 6df5409d93..df92ed0144 100644
--- a/src/dbnode/storage/namespace_readers.go
+++ b/src/dbnode/storage/namespace_readers.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
@@ -72,6 +73,8 @@ type databaseNamespaceReaderManager interface {
put(reader fs.DataFileSetReader) error
+ assignShardSet(shardSet sharding.ShardSet)
+
tick()
close()
@@ -105,6 +108,7 @@ type namespaceReaderManager struct {
closedReaders []cachedReader
openReaders map[cachedOpenReaderKey]cachedReader
+ shardSet sharding.ShardSet
metrics namespaceReaderManagerMetrics
}
@@ -162,6 +166,7 @@ func newNamespaceReaderManager(
bytesPool: opts.BytesPool(),
logger: opts.InstrumentOptions().Logger(),
openReaders: make(map[cachedOpenReaderKey]cachedReader),
+ shardSet: sharding.NewEmptyShardSet(sharding.DefaultHashFn(1)),
metrics: newNamespaceReaderManagerMetrics(namespaceScope),
}
@@ -199,6 +204,19 @@ func (m *namespaceReaderManager) filesetExistsAt(
m.namespace.ID(), shard, blockStart, latestVolume)
}
+func (m *namespaceReaderManager) assignShardSet(shardSet sharding.ShardSet) {
+ m.Lock()
+ defer m.Unlock()
+ m.shardSet = shardSet
+}
+
+func (m *namespaceReaderManager) shardExistsWithLock(shard uint32) bool {
+ _, err := m.shardSet.LookupStateByID(shard)
+ // NB(bodu): LookupStateByID returns ErrInvalidShardID when shard
+ // does not exist in the shard map which means the shard is not available.
+ return err == nil
+}
+
type cachedReaderForKeyResult struct {
openReader fs.DataFileSetReader
closedReader fs.DataFileSetReader
@@ -446,7 +464,11 @@ func (m *namespaceReaderManager) tickWithThreshold(threshold int) {
for key, elem := range m.openReaders {
// Mutate the for-loop copy in place before checking the threshold
elem.ticksSinceUsed++
- if elem.ticksSinceUsed >= threshold {
+ if elem.ticksSinceUsed >= threshold ||
+ // Also check to see if shard is still available and remove cached readers for
+ // shards that are no longer available. This ensures cached readers are eventually
+ // consistent with shard state.
+ !m.shardExistsWithLock(key.shard) {
// Close before removing ref
if err := elem.reader.Close(); err != nil {
m.logger.Error("error closing reader from reader cache", zap.Error(err))
@@ -454,6 +476,7 @@ func (m *namespaceReaderManager) tickWithThreshold(threshold int) {
delete(m.openReaders, key)
continue
}
+
// Save the mutated copy back to the map
m.openReaders[key] = elem
}
diff --git a/src/dbnode/storage/namespace_test.go b/src/dbnode/storage/namespace_test.go
index fc69e57a8b..6566db69f4 100644
--- a/src/dbnode/storage/namespace_test.go
+++ b/src/dbnode/storage/namespace_test.go
@@ -39,7 +39,6 @@ import (
"github.com/m3db/m3/src/dbnode/storage/repair"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/tracepoint"
- "github.com/m3db/m3/src/dbnode/ts"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
xidx "github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/x/context"
@@ -91,7 +90,9 @@ func newTestNamespaceWithIDOpts(
shardSet, err := sharding.NewShardSet(testShardIDs, hashFn)
require.NoError(t, err)
dopts := DefaultTestOptions().SetRuntimeOptionsManager(runtime.NewOptionsManager())
- ns, err := newDatabaseNamespace(metadata, shardSet, nil, nil, nil, dopts)
+ ns, err := newDatabaseNamespace(metadata,
+ namespace.NewRuntimeOptionsManager(metadata.ID().String()),
+ shardSet, nil, nil, nil, dopts)
require.NoError(t, err)
closer := dopts.RuntimeOptionsManager().Close
return ns.(*dbNamespace), closer
@@ -106,7 +107,9 @@ func newTestNamespaceWithOpts(
hashFn := func(identifier ident.ID) uint32 { return testShardIDs[0].ID() }
shardSet, err := sharding.NewShardSet(testShardIDs, hashFn)
require.NoError(t, err)
- ns, err := newDatabaseNamespace(metadata, shardSet, nil, nil, nil, dopts)
+ ns, err := newDatabaseNamespace(metadata,
+ namespace.NewRuntimeOptionsManager(metadata.ID().String()),
+ shardSet, nil, nil, nil, dopts)
require.NoError(t, err)
closer := dopts.RuntimeOptionsManager().Close
return ns.(*dbNamespace), closer
@@ -114,7 +117,7 @@ func newTestNamespaceWithOpts(
func newTestNamespaceWithIndex(
t *testing.T,
- index namespaceIndex,
+ index NamespaceIndex,
) (*dbNamespace, closerFn) {
ns, closer := newTestNamespace(t)
if index != nil {
@@ -125,7 +128,7 @@ func newTestNamespaceWithIndex(
func newTestNamespaceWithTruncateType(
t *testing.T,
- index namespaceIndex,
+ index NamespaceIndex,
truncateType series.TruncateType,
) (*dbNamespace, closerFn) {
opts := DefaultTestOptions().
@@ -192,11 +195,11 @@ func TestNamespaceWriteShardNotOwned(t *testing.T) {
ns.shards[i] = nil
}
now := time.Now()
- _, wasWritten, err := ns.Write(ctx, ident.StringID("foo"), now, 0.0, xtime.Second, nil)
+ seriesWrite, err := ns.Write(ctx, ident.StringID("foo"), now, 0.0, xtime.Second, nil)
require.Error(t, err)
require.True(t, xerrors.IsRetryableError(err))
require.Equal(t, "not responsible for shard 0", err.Error())
- require.False(t, wasWritten)
+ require.False(t, seriesWrite.WasWritten)
}
func TestNamespaceWriteShardOwned(t *testing.T) {
@@ -221,19 +224,19 @@ func TestNamespaceWriteShardOwned(t *testing.T) {
TruncateType: truncateType,
}
shard.EXPECT().Write(ctx, id, now, val, unit, ant, opts).
- Return(ts.Series{}, true, nil).Times(1)
+ Return(SeriesWrite{WasWritten: true}, nil).Times(1)
shard.EXPECT().Write(ctx, id, now, val, unit, ant, opts).
- Return(ts.Series{}, false, nil).Times(1)
+ Return(SeriesWrite{WasWritten: false}, nil).Times(1)
ns.shards[testShardIDs[0].ID()] = shard
- _, wasWritten, err := ns.Write(ctx, id, now, val, unit, ant)
+ seriesWrite, err := ns.Write(ctx, id, now, val, unit, ant)
require.NoError(t, err)
- require.True(t, wasWritten)
+ require.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = ns.Write(ctx, id, now, val, unit, ant)
+ seriesWrite, err = ns.Write(ctx, id, now, val, unit, ant)
require.NoError(t, err)
- require.False(t, wasWritten)
+ require.False(t, seriesWrite.WasWritten)
}
}
@@ -323,16 +326,25 @@ func TestNamespaceFetchBlocksShardOwned(t *testing.T) {
func TestNamespaceBootstrapBootstrapping(t *testing.T) {
ns, closer := newTestNamespace(t)
defer closer()
+
ns.bootstrapState = Bootstrapping
- require.Equal(t,
- errNamespaceIsBootstrapping, ns.Bootstrap(bootstrap.NamespaceResult{}))
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ err := ns.Bootstrap(ctx, bootstrap.NamespaceResult{})
+ require.Equal(t, errNamespaceIsBootstrapping, err)
}
func TestNamespaceBootstrapDontNeedBootstrap(t *testing.T) {
ns, closer := newTestNamespaceWithIDOpts(t, defaultTestNs1ID,
namespace.NewOptions().SetBootstrapEnabled(false))
defer closer()
- require.NoError(t, ns.Bootstrap(bootstrap.NamespaceResult{}))
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ require.NoError(t, ns.Bootstrap(ctx, bootstrap.NamespaceResult{}))
require.Equal(t, Bootstrapped, ns.bootstrapState)
}
@@ -350,7 +362,7 @@ func TestNamespaceBootstrapAllShards(t *testing.T) {
shard := NewMockdatabaseShard(ctrl)
shard.EXPECT().IsBootstrapped().Return(false)
shard.EXPECT().ID().Return(shardID)
- shard.EXPECT().Bootstrap().Return(errs[i])
+ shard.EXPECT().Bootstrap(gomock.Any(), gomock.Any()).Return(errs[i])
ns.shards[testShardIDs[i].ID()] = shard
shardIDs = append(shardIDs, shardID)
}
@@ -360,7 +372,10 @@ func TestNamespaceBootstrapAllShards(t *testing.T) {
Shards: shardIDs,
}
- require.Equal(t, "foo", ns.Bootstrap(nsResult).Error())
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ require.Equal(t, "foo", ns.Bootstrap(ctx, nsResult).Error())
require.Equal(t, BootstrapNotStarted, ns.bootstrapState)
}
@@ -392,7 +407,7 @@ func TestNamespaceBootstrapOnlyNonBootstrappedShards(t *testing.T) {
shard := NewMockdatabaseShard(ctrl)
shard.EXPECT().IsBootstrapped().Return(false)
shard.EXPECT().ID().Return(testShard.ID())
- shard.EXPECT().Bootstrap().Return(nil)
+ shard.EXPECT().Bootstrap(gomock.Any(), gomock.Any()).Return(nil)
ns.shards[testShard.ID()] = shard
shardIDs = append(shardIDs, testShard.ID())
}
@@ -410,7 +425,10 @@ func TestNamespaceBootstrapOnlyNonBootstrappedShards(t *testing.T) {
Shards: shardIDs,
}
- require.Error(t, ns.Bootstrap(nsResult))
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ require.Error(t, ns.Bootstrap(ctx, nsResult))
require.Equal(t, BootstrapNotStarted, ns.bootstrapState)
}
@@ -543,7 +561,10 @@ func TestNamespaceSnapshotShardError(t *testing.T) {
require.Error(t, testSnapshotWithShardSnapshotErrs(t, shardMethodResults))
}
-func testSnapshotWithShardSnapshotErrs(t *testing.T, shardMethodResults []snapshotTestCase) error {
+func testSnapshotWithShardSnapshotErrs(
+ t *testing.T,
+ shardMethodResults []snapshotTestCase,
+) error {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -570,7 +591,9 @@ func testSnapshotWithShardSnapshotErrs(t *testing.T, shardMethodResults []snapsh
shardID := uint32(i)
shard.EXPECT().ID().Return(uint32(i)).AnyTimes()
if tc.expectSnapshot {
- shard.EXPECT().Snapshot(blockStart, now, gomock.Any(), gomock.Any()).Return(tc.shardSnapshotErr)
+ shard.EXPECT().
+ Snapshot(blockStart, now, gomock.Any(), gomock.Any()).
+ Return(ShardSnapshotResult{}, tc.shardSnapshotErr)
}
ns.shards[testShardIDs[i].ID()] = shard
shardBootstrapStates[shardID] = tc.shardBootstrapStateBeforeTick
@@ -588,6 +611,7 @@ func TestNamespaceTruncate(t *testing.T) {
for _, shard := range testShardIDs {
mockShard := NewMockdatabaseShard(ctrl)
mockShard.EXPECT().NumSeries().Return(int64(shard.ID()))
+ mockShard.EXPECT().ID().Return(shard.ID())
ns.shards[shard.ID()] = mockShard
}
@@ -683,7 +707,9 @@ func TestNamespaceAssignShardSet(t *testing.T) {
dopts = dopts.SetInstrumentOptions(dopts.InstrumentOptions().
SetMetricsScope(scope))
- oNs, err := newDatabaseNamespace(metadata, shardSet, nil, nil, nil, dopts)
+ oNs, err := newDatabaseNamespace(metadata,
+ namespace.NewRuntimeOptionsManager(metadata.ID().String()),
+ shardSet, nil, nil, nil, dopts)
require.NoError(t, err)
ns := oNs.(*dbNamespace)
@@ -756,7 +782,9 @@ func newNeedsFlushNamespace(t *testing.T, shardNumbers []uint32) *dbNamespace {
return at
}))
- ns, err := newDatabaseNamespace(metadata, shardSet, nil, nil, nil, dopts)
+ ns, err := newDatabaseNamespace(metadata,
+ namespace.NewRuntimeOptionsManager(metadata.ID().String()),
+ shardSet, nil, nil, nil, dopts)
require.NoError(t, err)
return ns.(*dbNamespace)
}
@@ -901,7 +929,9 @@ func TestNamespaceNeedsFlushAllSuccess(t *testing.T) {
blockStart := retention.FlushTimeEnd(ropts, at)
- oNs, err := newDatabaseNamespace(metadata, shardSet, nil, nil, nil, dopts)
+ oNs, err := newDatabaseNamespace(metadata,
+ namespace.NewRuntimeOptionsManager(metadata.ID().String()),
+ shardSet, nil, nil, nil, dopts)
require.NoError(t, err)
ns := oNs.(*dbNamespace)
@@ -942,7 +972,9 @@ func TestNamespaceNeedsFlushAnyFailed(t *testing.T) {
blockStart := retention.FlushTimeEnd(ropts, at)
- oNs, err := newDatabaseNamespace(testNs, shardSet, nil, nil, nil, dopts)
+ oNs, err := newDatabaseNamespace(testNs,
+ namespace.NewRuntimeOptionsManager(testNs.ID().String()),
+ shardSet, nil, nil, nil, dopts)
require.NoError(t, err)
ns := oNs.(*dbNamespace)
for _, s := range shards {
@@ -994,7 +1026,9 @@ func TestNamespaceNeedsFlushAnyNotStarted(t *testing.T) {
blockStart := retention.FlushTimeEnd(ropts, at)
- oNs, err := newDatabaseNamespace(testNs, shardSet, nil, nil, nil, dopts)
+ oNs, err := newDatabaseNamespace(testNs,
+ namespace.NewRuntimeOptionsManager(testNs.ID().String()),
+ shardSet, nil, nil, nil, dopts)
require.NoError(t, err)
ns := oNs.(*dbNamespace)
for _, s := range shards {
@@ -1042,7 +1076,7 @@ func TestNamespaceCloseWillCloseShard(t *testing.T) {
require.NoError(t, ns.Close())
// Check the namespace no long owns any shards
- require.Empty(t, ns.GetOwnedShards())
+ require.Empty(t, ns.OwnedShards())
}
func TestNamespaceCloseDoesNotLeak(t *testing.T) {
@@ -1070,7 +1104,7 @@ func TestNamespaceCloseDoesNotLeak(t *testing.T) {
require.NoError(t, ns.Close())
// Check the namespace no long owns any shards
- require.Empty(t, ns.GetOwnedShards())
+ require.Empty(t, ns.OwnedShards())
}
func TestNamespaceIndexInsert(t *testing.T) {
@@ -1079,7 +1113,7 @@ func TestNamespaceIndexInsert(t *testing.T) {
truncateTypes := []series.TruncateType{series.TypeBlock, series.TypeNone}
for _, truncateType := range truncateTypes {
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
ns, closer := newTestNamespaceWithTruncateType(t, idx, truncateType)
ns.reverseIndex = idx
@@ -1093,22 +1127,26 @@ func TestNamespaceIndexInsert(t *testing.T) {
opts := series.WriteOptions{
TruncateType: truncateType,
}
- shard.EXPECT().WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator,
- now, 1.0, xtime.Second, nil, opts).Return(ts.Series{}, true, nil)
- shard.EXPECT().WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator,
- now, 1.0, xtime.Second, nil, opts).Return(ts.Series{}, false, nil)
+ shard.EXPECT().
+ WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator,
+ now, 1.0, xtime.Second, nil, opts).
+ Return(SeriesWrite{WasWritten: true}, nil)
+ shard.EXPECT().
+ WriteTagged(ctx, ident.NewIDMatcher("a"), ident.EmptyTagIterator,
+ now, 1.0, xtime.Second, nil, opts).
+ Return(SeriesWrite{WasWritten: false}, nil)
ns.shards[testShardIDs[0].ID()] = shard
- _, wasWritten, err := ns.WriteTagged(ctx, ident.StringID("a"),
+ seriesWrite, err := ns.WriteTagged(ctx, ident.StringID("a"),
ident.EmptyTagIterator, now, 1.0, xtime.Second, nil)
require.NoError(t, err)
- require.True(t, wasWritten)
+ require.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = ns.WriteTagged(ctx, ident.StringID("a"),
+ seriesWrite, err = ns.WriteTagged(ctx, ident.StringID("a"),
ident.EmptyTagIterator, now, 1.0, xtime.Second, nil)
require.NoError(t, err)
- require.False(t, wasWritten)
+ require.False(t, seriesWrite.WasWritten)
shard.EXPECT().Close()
idx.EXPECT().Close().Return(nil)
@@ -1120,7 +1158,7 @@ func TestNamespaceIndexQuery(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
idx.EXPECT().BootstrapsDone().Return(uint(1))
ns, closer := newTestNamespaceWithIndex(t, idx)
@@ -1154,7 +1192,7 @@ func TestNamespaceAggregateQuery(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
idx.EXPECT().BootstrapsDone().Return(uint(1))
ns, closer := newTestNamespaceWithIndex(t, idx)
@@ -1178,18 +1216,26 @@ func TestNamespaceTicksIndex(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
ns, closer := newTestNamespaceWithIndex(t, idx)
defer closer()
+
+ ns.RLock()
+ nsCtx := ns.nsContextWithRLock()
+ ns.RUnlock()
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
for _, s := range ns.shards {
if s != nil {
- s.Bootstrap()
+ s.Bootstrap(ctx, nsCtx)
}
}
- ctx := context.NewCancellable()
- idx.EXPECT().Tick(ctx, gomock.Any()).Return(namespaceIndexTickResult{}, nil)
- err := ns.Tick(ctx, time.Now())
+ cancel := context.NewCancellable()
+ idx.EXPECT().Tick(cancel, gomock.Any()).Return(namespaceIndexTickResult{}, nil)
+ err := ns.Tick(cancel, time.Now())
require.NoError(t, err)
}
diff --git a/src/dbnode/storage/options.go b/src/dbnode/storage/options.go
index eab0895836..22e2bcfe3a 100644
--- a/src/dbnode/storage/options.go
+++ b/src/dbnode/storage/options.go
@@ -42,7 +42,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/repair"
"github.com/m3db/m3/src/dbnode/storage/series"
- "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/x/context"
@@ -75,6 +75,8 @@ const (
// defaultNumLoadedBytesLimit is the default limit (2GiB) for the number of outstanding loaded bytes that
// the memory tracker will allow.
defaultNumLoadedBytesLimit = 2 << 30
+
+ defaultMediatorTickInterval = 5 * time.Second
)
var (
@@ -93,6 +95,7 @@ var (
errIndexOptionsNotSet = errors.New("index enabled but index options are not set")
errPersistManagerNotSet = errors.New("persist manager is not set")
errBlockLeaserNotSet = errors.New("block leaser is not set")
+ errOnColdFlushNotSet = errors.New("on cold flush is not set, requires at least a no-op implementation")
)
// NewSeriesOptionsFromOptions creates a new set of database series options from provided options.
@@ -112,52 +115,57 @@ func NewSeriesOptionsFromOptions(opts Options, ropts retention.Options) series.O
SetMultiReaderIteratorPool(opts.MultiReaderIteratorPool()).
SetIdentifierPool(opts.IdentifierPool()).
SetBufferBucketPool(opts.BufferBucketPool()).
- SetBufferBucketVersionsPool(opts.BufferBucketVersionsPool())
+ SetBufferBucketVersionsPool(opts.BufferBucketVersionsPool()).
+ SetRuntimeOptionsManager(opts.RuntimeOptionsManager())
}
type options struct {
- clockOpts clock.Options
- instrumentOpts instrument.Options
- nsRegistryInitializer namespace.Initializer
- blockOpts block.Options
- commitLogOpts commitlog.Options
- runtimeOptsMgr m3dbruntime.OptionsManager
- errWindowForLoad time.Duration
- errThresholdForLoad int64
- indexingEnabled bool
- repairEnabled bool
- truncateType series.TruncateType
- transformOptions series.WriteTransformOptions
- indexOpts index.Options
- repairOpts repair.Options
- newEncoderFn encoding.NewEncoderFn
- newDecoderFn encoding.NewDecoderFn
- bootstrapProcessProvider bootstrap.ProcessProvider
- persistManager persist.Manager
- blockRetrieverManager block.DatabaseBlockRetrieverManager
- poolOpts pool.ObjectPoolOptions
- contextPool context.Pool
- seriesCachePolicy series.CachePolicy
- seriesOpts series.Options
- seriesPool series.DatabaseSeriesPool
- bytesPool pool.CheckedBytesPool
- encoderPool encoding.EncoderPool
- segmentReaderPool xio.SegmentReaderPool
- readerIteratorPool encoding.ReaderIteratorPool
- multiReaderIteratorPool encoding.MultiReaderIteratorPool
- identifierPool ident.Pool
- fetchBlockMetadataResultsPool block.FetchBlockMetadataResultsPool
- fetchBlocksMetadataResultsPool block.FetchBlocksMetadataResultsPool
- queryIDsWorkerPool xsync.WorkerPool
- writeBatchPool *ts.WriteBatchPool
- bufferBucketPool *series.BufferBucketPool
- bufferBucketVersionsPool *series.BufferBucketVersionsPool
- retrieveRequestPool fs.RetrieveRequestPool
- checkedBytesWrapperPool xpool.CheckedBytesWrapperPool
- schemaReg namespace.SchemaRegistry
- blockLeaseManager block.LeaseManager
- memoryTracker MemoryTracker
- mmapReporter mmap.Reporter
+ clockOpts clock.Options
+ instrumentOpts instrument.Options
+ nsRegistryInitializer namespace.Initializer
+ blockOpts block.Options
+ commitLogOpts commitlog.Options
+ runtimeOptsMgr m3dbruntime.OptionsManager
+ errWindowForLoad time.Duration
+ errThresholdForLoad int64
+ indexingEnabled bool
+ repairEnabled bool
+ truncateType series.TruncateType
+ transformOptions series.WriteTransformOptions
+ indexOpts index.Options
+ repairOpts repair.Options
+ newEncoderFn encoding.NewEncoderFn
+ newDecoderFn encoding.NewDecoderFn
+ bootstrapProcessProvider bootstrap.ProcessProvider
+ persistManager persist.Manager
+ blockRetrieverManager block.DatabaseBlockRetrieverManager
+ poolOpts pool.ObjectPoolOptions
+ contextPool context.Pool
+ seriesCachePolicy series.CachePolicy
+ seriesOpts series.Options
+ seriesPool series.DatabaseSeriesPool
+ bytesPool pool.CheckedBytesPool
+ encoderPool encoding.EncoderPool
+ segmentReaderPool xio.SegmentReaderPool
+ readerIteratorPool encoding.ReaderIteratorPool
+ multiReaderIteratorPool encoding.MultiReaderIteratorPool
+ identifierPool ident.Pool
+ fetchBlockMetadataResultsPool block.FetchBlockMetadataResultsPool
+ fetchBlocksMetadataResultsPool block.FetchBlocksMetadataResultsPool
+ queryIDsWorkerPool xsync.WorkerPool
+ writeBatchPool *writes.WriteBatchPool
+ bufferBucketPool *series.BufferBucketPool
+ bufferBucketVersionsPool *series.BufferBucketVersionsPool
+ retrieveRequestPool fs.RetrieveRequestPool
+ checkedBytesWrapperPool xpool.CheckedBytesWrapperPool
+ schemaReg namespace.SchemaRegistry
+ blockLeaseManager block.LeaseManager
+ onColdFlush OnColdFlush
+ memoryTracker MemoryTracker
+ mmapReporter mmap.Reporter
+ doNotIndexWithFieldsMap map[string]string
+ namespaceRuntimeOptsMgrRegistry namespace.RuntimeOptionsManagerRegistry
+ mediatorTickInterval time.Duration
}
// NewOptions creates a new set of storage options with defaults
@@ -176,7 +184,7 @@ func newOptions(poolOpts pool.ObjectPoolOptions) Options {
queryIDsWorkerPool := xsync.NewWorkerPool(int(math.Ceil(float64(runtime.NumCPU()) / 2)))
queryIDsWorkerPool.Init()
- writeBatchPool := ts.NewWriteBatchPool(poolOpts, nil, nil)
+ writeBatchPool := writes.NewWriteBatchPool(poolOpts, nil, nil)
writeBatchPool.Init()
segmentReaderPool := xio.NewSegmentReaderPool(poolOpts)
@@ -218,16 +226,19 @@ func newOptions(poolOpts pool.ObjectPoolOptions) Options {
TagsPoolOptions: poolOpts,
TagsIteratorPoolOptions: poolOpts,
}),
- fetchBlockMetadataResultsPool: block.NewFetchBlockMetadataResultsPool(poolOpts, 0),
- fetchBlocksMetadataResultsPool: block.NewFetchBlocksMetadataResultsPool(poolOpts, 0),
- queryIDsWorkerPool: queryIDsWorkerPool,
- writeBatchPool: writeBatchPool,
- bufferBucketVersionsPool: series.NewBufferBucketVersionsPool(poolOpts),
- bufferBucketPool: series.NewBufferBucketPool(poolOpts),
- retrieveRequestPool: retrieveRequestPool,
- checkedBytesWrapperPool: bytesWrapperPool,
- schemaReg: namespace.NewSchemaRegistry(false, nil),
- memoryTracker: NewMemoryTracker(NewMemoryTrackerOptions(defaultNumLoadedBytesLimit)),
+ fetchBlockMetadataResultsPool: block.NewFetchBlockMetadataResultsPool(poolOpts, 0),
+ fetchBlocksMetadataResultsPool: block.NewFetchBlocksMetadataResultsPool(poolOpts, 0),
+ queryIDsWorkerPool: queryIDsWorkerPool,
+ writeBatchPool: writeBatchPool,
+ bufferBucketVersionsPool: series.NewBufferBucketVersionsPool(poolOpts),
+ bufferBucketPool: series.NewBufferBucketPool(poolOpts),
+ retrieveRequestPool: retrieveRequestPool,
+ checkedBytesWrapperPool: bytesWrapperPool,
+ schemaReg: namespace.NewSchemaRegistry(false, nil),
+ onColdFlush: &noOpColdFlush{},
+ memoryTracker: NewMemoryTracker(NewMemoryTrackerOptions(defaultNumLoadedBytesLimit)),
+ namespaceRuntimeOptsMgrRegistry: namespace.NewRuntimeOptionsManagerRegistry(),
+ mediatorTickInterval: defaultMediatorTickInterval,
}
return o.SetEncodingM3TSZPooled()
}
@@ -281,6 +292,10 @@ func (o *options) Validate() error {
return errBlockLeaserNotSet
}
+ if o.onColdFlush == nil {
+ return errOnColdFlushNotSet
+ }
+
return nil
}
@@ -666,13 +681,13 @@ func (o *options) QueryIDsWorkerPool() xsync.WorkerPool {
return o.queryIDsWorkerPool
}
-func (o *options) SetWriteBatchPool(value *ts.WriteBatchPool) Options {
+func (o *options) SetWriteBatchPool(value *writes.WriteBatchPool) Options {
opts := *o
opts.writeBatchPool = value
return &opts
}
-func (o *options) WriteBatchPool() *ts.WriteBatchPool {
+func (o *options) WriteBatchPool() *writes.WriteBatchPool {
return o.writeBatchPool
}
@@ -736,6 +751,16 @@ func (o *options) BlockLeaseManager() block.LeaseManager {
return o.blockLeaseManager
}
+func (o *options) SetOnColdFlush(value OnColdFlush) Options {
+ opts := *o
+ opts.onColdFlush = value
+ return &opts
+}
+
+func (o *options) OnColdFlush() OnColdFlush {
+ return o.onColdFlush
+}
+
func (o *options) SetMemoryTracker(memTracker MemoryTracker) Options {
opts := *o
opts.memoryTracker = memTracker
@@ -755,3 +780,41 @@ func (o *options) SetMmapReporter(mmapReporter mmap.Reporter) Options {
func (o *options) MmapReporter() mmap.Reporter {
return o.mmapReporter
}
+
+func (o *options) SetDoNotIndexWithFieldsMap(value map[string]string) Options {
+ opts := *o
+ opts.doNotIndexWithFieldsMap = value
+ return &opts
+}
+
+func (o *options) DoNotIndexWithFieldsMap() map[string]string {
+ return o.doNotIndexWithFieldsMap
+}
+
+func (o *options) SetNamespaceRuntimeOptionsManagerRegistry(
+ value namespace.RuntimeOptionsManagerRegistry,
+) Options {
+ opts := *o
+ opts.namespaceRuntimeOptsMgrRegistry = value
+ return &opts
+}
+
+func (o *options) NamespaceRuntimeOptionsManagerRegistry() namespace.RuntimeOptionsManagerRegistry {
+ return o.namespaceRuntimeOptsMgrRegistry
+}
+
+func (o *options) SetMediatorTickInterval(value time.Duration) Options {
+ opts := *o
+ opts.mediatorTickInterval = value
+ return &opts
+}
+
+func (o *options) MediatorTickInterval() time.Duration {
+ return o.mediatorTickInterval
+}
+
+type noOpColdFlush struct{}
+
+func (n *noOpColdFlush) ColdFlushNamespace(ns Namespace) (OnColdFlushNamespace, error) {
+ return &persist.NoOpColdFlushNamespace{}, nil
+}
diff --git a/src/dbnode/storage/repair.go b/src/dbnode/storage/repair.go
index 37327567a9..202dbbe2f6 100644
--- a/src/dbnode/storage/repair.go
+++ b/src/dbnode/storage/repair.go
@@ -56,7 +56,12 @@ var (
errRepairInProgress = errors.New("repair already in progress")
)
-type recordFn func(namespace ident.ID, shard databaseShard, diffRes repair.MetadataComparisonResult)
+type recordFn func(
+ origin topology.Host,
+ namespace ident.ID,
+ shard databaseShard,
+ diffRes repair.MetadataComparisonResult,
+)
// TODO(rartoul): See if we can find a way to guard against too much metadata.
type shardRepairer struct {
@@ -206,6 +211,10 @@ func (r shardRepairer) Repair(
seriesWithChecksumMismatches = metadataRes.ChecksumDifferences.Series()
)
+ // Shard repair can fail due to transient network errors due to the significant amount of data fetched from peers.
+ // So collect and emit metadata comparison metrics before fetching blocks from peer to repair.
+ r.recordFn(origin, nsCtx.ID, shard, metadataRes)
+
originID := origin.ID()
for _, e := range seriesWithChecksumMismatches.Iter() {
for blockStart, replicaMetadataBlocks := range e.Value().Metadata.Blocks() {
@@ -294,8 +303,6 @@ func (r shardRepairer) Repair(
return repair.MetadataComparisonResult{}, err
}
- r.recordFn(nsCtx.ID, shard, metadataRes)
-
return metadataRes, nil
}
@@ -355,6 +362,7 @@ func (r shardRepairer) loadDataIntoShard(shard databaseShard, data result.ShardR
}
func (r shardRepairer) recordDifferences(
+ origin topology.Host,
namespace ident.ID,
shard databaseShard,
diffRes repair.MetadataComparisonResult,
@@ -377,11 +385,69 @@ func (r shardRepairer) recordDifferences(
sizeDiffScope.Counter("series").Inc(diffRes.SizeDifferences.NumSeries())
sizeDiffScope.Counter("blocks").Inc(diffRes.SizeDifferences.NumBlocks())
+ absoluteBlockSizeDiff, blockSizeDiffAsPercentage := r.computeMaximumBlockSizeDifference(origin, diffRes)
+ sizeDiffScope.Gauge("max-block-size-diff").Update(float64(absoluteBlockSizeDiff))
+ sizeDiffScope.Gauge("max-block-size-diff-as-percentage").Update(blockSizeDiffAsPercentage)
+
// Record checksum differences.
checksumDiffScope.Counter("series").Inc(diffRes.ChecksumDifferences.NumSeries())
checksumDiffScope.Counter("blocks").Inc(diffRes.ChecksumDifferences.NumBlocks())
}
+// computeMaximumBlockSizeDifferenceAsPercentage returns a metric which represents maximum divergence of a shard with
+// any of its peers. A positive divergence means that origin shard has more data than its peer and a negative
+// divergence means that origin shard has lesser data than its peer. Since sizes for all the blocks in rentention
+// window are not readily available, exact divergence of a shard from its peer cannot be calculated. So this method
+// settles for returning maximum divergence of a block/shard with any of its peers. Divergence(as percentage) of shard
+// is upper bounded by divergence of block/shard so this metric can be used to monitor severity of divergence.
+func (r shardRepairer) computeMaximumBlockSizeDifference(
+ origin topology.Host,
+ diffRes repair.MetadataComparisonResult,
+) (int64, float64) {
+ var (
+ maxBlockSizeDiffAsRatio float64
+ maxBlockSizeDiff int64
+ )
+ // Iterate over all the series which differ in size between origin and a peer.
+ for _, entry := range diffRes.SizeDifferences.Series().Iter() {
+ series := entry.Value()
+ replicaBlocksMetadata := diffRes.SizeDifferences.GetOrAdd(series.ID)
+ // Iterate over all the time ranges which had a mismatched series between origin and a peer.
+ for _, replicasMetadata := range replicaBlocksMetadata.Blocks() {
+ var (
+ // Setting minimum origin block size to 1 so that percetages off of origin block size can be calculated
+ // without worrying about divide by zero errors. Exact percentages are not required so setting a
+ // non-zero size for an empty block is acceptable.
+ originBlockSize int64 = 1
+ // Represents maximum size difference of a block with one of its peers.
+ maxPeerBlockSizeDiff int64
+ )
+ // Record the block size on the origin.
+ for _, replicaMetadata := range replicasMetadata.Metadata() {
+ if replicaMetadata.Host.ID() == origin.ID() && replicaMetadata.Size > 0 {
+ originBlockSize = replicaMetadata.Size
+ break
+ }
+ }
+ // Fetch the maximum block size difference of origin with any of its peers.
+ for _, replicaMetadata := range replicasMetadata.Metadata() {
+ if replicaMetadata.Host.ID() != origin.ID() {
+ blockSizeDiff := originBlockSize - replicaMetadata.Size
+ if math.Abs(float64(blockSizeDiff)) > math.Abs(float64(maxPeerBlockSizeDiff)) {
+ maxPeerBlockSizeDiff = blockSizeDiff
+ }
+ }
+ }
+ // Record divergence as percentage for origin block which has diverged the most from its peers.
+ if math.Abs(float64(maxPeerBlockSizeDiff)) > math.Abs(float64(maxBlockSizeDiff)) {
+ maxBlockSizeDiff = maxPeerBlockSizeDiff
+ maxBlockSizeDiffAsRatio = float64(maxPeerBlockSizeDiff) / float64(originBlockSize)
+ }
+ }
+ }
+ return maxBlockSizeDiff, maxBlockSizeDiffAsRatio * 100
+}
+
type repairFn func() error
type sleepFn func(d time.Duration)
@@ -563,7 +629,7 @@ func (r *dbRepairer) Repair() error {
}()
multiErr := xerrors.NewMultiError()
- namespaces, err := r.database.GetOwnedNamespaces()
+ namespaces, err := r.database.OwnedNamespaces()
if err != nil {
return err
}
@@ -606,8 +672,9 @@ func (r *dbRepairer) Repair() error {
if err := r.repairNamespaceBlockstart(n, blockStart); err != nil {
multiErr = multiErr.Add(err)
+ } else {
+ hasRepairedABlockStart = true
}
- hasRepairedABlockStart = true
return true
})
diff --git a/src/dbnode/storage/repair/new_map_gen.go b/src/dbnode/storage/repair/new_map_gen.go
index 2454ef9e13..72304592e6 100644
--- a/src/dbnode/storage/repair/new_map_gen.go
+++ b/src/dbnode/storage/repair/new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/storage/repair_test.go b/src/dbnode/storage/repair_test.go
index fce5974335..8cc69c9605 100644
--- a/src/dbnode/storage/repair_test.go
+++ b/src/dbnode/storage/repair_test.go
@@ -21,6 +21,7 @@
package storage
import (
+ "errors"
"sync"
"testing"
"time"
@@ -269,7 +270,8 @@ func testDatabaseShardRepairerRepair(t *testing.T, withLimit bool) {
databaseShardRepairer := newShardRepairer(opts, rpOpts)
repairer := databaseShardRepairer.(shardRepairer)
- repairer.recordFn = func(nsID ident.ID, shard databaseShard, diffRes repair.MetadataComparisonResult) {
+ repairer.recordFn = func(origin topology.Host, nsID ident.ID, shard databaseShard,
+ diffRes repair.MetadataComparisonResult) {
resNamespace = nsID
resShard = shard
resDiff = diffRes
@@ -369,11 +371,12 @@ func TestDatabaseShardRepairerRepairMultiSession(t *testing.T) {
copts = opts.ClockOptions()
iopts = opts.InstrumentOptions()
rtopts = defaultTestRetentionOpts
+ scope = tally.NewTestScope("", nil)
)
opts = opts.
SetClockOptions(copts.SetNowFn(nowFn)).
- SetInstrumentOptions(iopts.SetMetricsScope(tally.NoopScope))
+ SetInstrumentOptions(iopts.SetMetricsScope(scope))
var (
namespaceID = ident.StringID("testNamespace")
@@ -386,7 +389,7 @@ func TestDatabaseShardRepairerRepairMultiSession(t *testing.T) {
IncludeLastRead: false,
}
- sizes = []int64{1, 2, 3, 4}
+ sizes = []int64{3423, 987, 8463, 578}
checksums = []uint32{4, 5, 6, 7}
lastRead = now.Add(-time.Minute)
shardID = uint32(0)
@@ -422,10 +425,12 @@ func TestDatabaseShardRepairerRepairMultiSession(t *testing.T) {
inputBlocks := []block.ReplicaMetadata{
{
- Metadata: block.NewMetadata(ident.StringID("foo"), ident.Tags{}, now.Add(30*time.Minute), sizes[0], &checksums[0], lastRead),
+ // Peer block size size[2] is different from origin block size size[0]
+ Metadata: block.NewMetadata(ident.StringID("foo"), ident.Tags{}, now.Add(30*time.Minute), sizes[2], &checksums[0], lastRead),
},
{
- Metadata: block.NewMetadata(ident.StringID("foo"), ident.Tags{}, now.Add(time.Hour), sizes[0], &checksums[1], lastRead),
+ // Peer block size size[3] is different from origin block size size[1]
+ Metadata: block.NewMetadata(ident.StringID("foo"), ident.Tags{}, now.Add(time.Hour), sizes[3], &checksums[1], lastRead),
},
{
// Mismatch checksum so should trigger repair of this series.
@@ -498,29 +503,16 @@ func TestDatabaseShardRepairerRepairMultiSession(t *testing.T) {
Return(peerBlocksIter, nil)
}
- var (
- resNamespace ident.ID
- resShard databaseShard
- resDiff repair.MetadataComparisonResult
- )
-
databaseShardRepairer := newShardRepairer(opts, rpOpts)
repairer := databaseShardRepairer.(shardRepairer)
- repairer.recordFn = func(nsID ident.ID, shard databaseShard, diffRes repair.MetadataComparisonResult) {
- resNamespace = nsID
- resShard = shard
- resDiff = diffRes
- }
var (
ctx = context.NewContext()
nsCtx = namespace.Context{ID: namespaceID}
)
- require.NoError(t, err)
- repairer.Repair(ctx, nsCtx, nsMeta, repairTimeRange, shard)
+ resDiff, err := repairer.Repair(ctx, nsCtx, nsMeta, repairTimeRange, shard)
- require.Equal(t, namespaceID, resNamespace)
- require.Equal(t, resShard, shard)
+ require.NoError(t, err)
require.Equal(t, int64(2), resDiff.NumSeries)
require.Equal(t, int64(3), resDiff.NumBlocks)
@@ -546,7 +538,19 @@ func TestDatabaseShardRepairerRepairMultiSession(t *testing.T) {
series, exists = sizeDiffSeries.Get(ident.StringID("foo"))
require.True(t, exists)
blocks = series.Metadata.Blocks()
- require.Equal(t, 1, len(blocks))
+ require.Equal(t, 2, len(blocks))
+ // Validate first block
+ currBlock, exists = blocks[xtime.ToUnixNano(now.Add(30*time.Minute))]
+ require.True(t, exists)
+ require.Equal(t, now.Add(30*time.Minute), currBlock.Start())
+ expected = []block.ReplicaMetadata{
+ // Size difference for series "foo".
+ {Host: origin, Metadata: block.NewMetadata(ident.StringID("foo"), ident.Tags{}, now.Add(30*time.Minute), sizes[0], &checksums[0], lastRead)},
+ {Host: hosts[0], Metadata: inputBlocks[0].Metadata},
+ {Host: hosts[1], Metadata: inputBlocks[0].Metadata},
+ }
+ require.Equal(t, expected, currBlock.Metadata())
+ // Validate second block
currBlock, exists = blocks[xtime.ToUnixNano(now.Add(time.Hour))]
require.True(t, exists)
require.Equal(t, now.Add(time.Hour), currBlock.Start())
@@ -557,10 +561,27 @@ func TestDatabaseShardRepairerRepairMultiSession(t *testing.T) {
{Host: hosts[1], Metadata: inputBlocks[1].Metadata},
}
require.Equal(t, expected, currBlock.Metadata())
+
+ // Validate the expected metrics were emitted
+ scopeSnapshot := scope.Snapshot()
+ countersSnapshot := scopeSnapshot.Counters()
+ gaugesSnapshot := scopeSnapshot.Gauges()
+ require.Equal(t, int64(2),
+ countersSnapshot["repair.series+namespace=testNamespace,resultType=total,shard=0"].Value())
+ require.Equal(t, int64(3),
+ countersSnapshot["repair.blocks+namespace=testNamespace,resultType=total,shard=0"].Value())
+ // Validate that first block's divergence is emitted instead of second block because first block is diverged
+ // more than second block from its peers.
+ scopeTags := map[string]string{"namespace": "testNamespace", "resultType": "sizeDiff", "shard": "0"}
+ require.Equal(t, float64(sizes[0]-sizes[2]),
+ gaugesSnapshot[tally.KeyForPrefixedStringMap("repair.max-block-size-diff", scopeTags)].Value())
+ require.Equal(t, float64(100*(sizes[0]-sizes[2]))/float64(sizes[0]),
+ gaugesSnapshot[tally.KeyForPrefixedStringMap("repair.max-block-size-diff-as-percentage", scopeTags)].Value())
}
type expectedRepair struct {
- repairRange xtime.Range
+ expectedRepairRange xtime.Range
+ mockRepairResult error
}
func TestDatabaseRepairPrioritizationLogic(t *testing.T) {
@@ -594,9 +615,13 @@ func TestDatabaseRepairPrioritizationLogic(t *testing.T) {
expectedNS2Repair expectedRepair
}{
{
- title: "repairs most recent block if no repair state",
- expectedNS1Repair: expectedRepair{xtime.Range{Start: flushTimeEnd, End: flushTimeEnd.Add(blockSize)}},
- expectedNS2Repair: expectedRepair{xtime.Range{Start: flushTimeEnd, End: flushTimeEnd.Add(blockSize)}},
+ title: "repairs most recent block if no repair state",
+ expectedNS1Repair: expectedRepair{
+ expectedRepairRange: xtime.Range{Start: flushTimeEnd, End: flushTimeEnd.Add(blockSize)},
+ },
+ expectedNS2Repair: expectedRepair{
+ expectedRepairRange: xtime.Range{Start: flushTimeEnd, End: flushTimeEnd.Add(blockSize)},
+ },
},
{
title: "repairs next unrepaired block in reverse order if some (but not all) blocks have been repaired",
@@ -614,8 +639,12 @@ func TestDatabaseRepairPrioritizationLogic(t *testing.T) {
},
},
},
- expectedNS1Repair: expectedRepair{xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)}},
- expectedNS2Repair: expectedRepair{xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)}},
+ expectedNS1Repair: expectedRepair{
+ expectedRepairRange: xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)},
+ },
+ expectedNS2Repair: expectedRepair{
+ expectedRepairRange: xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)},
+ },
},
{
title: "repairs least recently repaired block if all blocks have been repaired",
@@ -641,8 +670,12 @@ func TestDatabaseRepairPrioritizationLogic(t *testing.T) {
},
},
},
- expectedNS1Repair: expectedRepair{xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)}},
- expectedNS2Repair: expectedRepair{xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)}},
+ expectedNS1Repair: expectedRepair{
+ expectedRepairRange: xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)},
+ },
+ expectedNS2Repair: expectedRepair{
+ expectedRepairRange: xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)},
+ },
},
}
@@ -675,11 +708,135 @@ func TestDatabaseRepairPrioritizationLogic(t *testing.T) {
ns1.EXPECT().ID().Return(ident.StringID("ns1")).AnyTimes()
ns2.EXPECT().ID().Return(ident.StringID("ns2")).AnyTimes()
- ns1.EXPECT().Repair(gomock.Any(), tc.expectedNS1Repair.repairRange)
- ns2.EXPECT().Repair(gomock.Any(), tc.expectedNS2Repair.repairRange)
+ ns1.EXPECT().Repair(gomock.Any(), tc.expectedNS1Repair.expectedRepairRange)
+ ns2.EXPECT().Repair(gomock.Any(), tc.expectedNS2Repair.expectedRepairRange)
- mockDatabase.EXPECT().GetOwnedNamespaces().Return(namespaces, nil)
+ mockDatabase.EXPECT().OwnedNamespaces().Return(namespaces, nil)
require.Nil(t, repairer.Repair())
})
}
}
+
+// Database repairer repairs blocks in decreasing time ranges for each namespace. If database repairer fails to
+// repair a time range of a namespace then instead of skipping repair of all past time ranges of that namespace, test
+// that database repaier tries to repair the past corrupt time range of that namespace.
+func TestDatabaseRepairSkipsPoisonShard(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ var (
+ rOpts = retention.NewOptions().
+ SetRetentionPeriod(retention.NewOptions().BlockSize() * 2)
+ nsOpts = namespace.NewOptions().
+ SetRetentionOptions(rOpts)
+ blockSize = rOpts.BlockSize()
+
+ // Set current time such that the previous block is flushable.
+ now = time.Now().Truncate(blockSize).Add(rOpts.BufferPast()).Add(time.Second)
+
+ flushTimeStart = retention.FlushTimeStart(rOpts, now)
+ flushTimeEnd = retention.FlushTimeEnd(rOpts, now)
+
+ //flushTimeStartNano = xtime.ToUnixNano(flushTimeStart)
+ flushTimeEndNano = xtime.ToUnixNano(flushTimeEnd)
+ )
+ require.NoError(t, nsOpts.Validate())
+ // Ensure only two flushable blocks in retention to make test logic simpler.
+ require.Equal(t, blockSize, flushTimeEnd.Sub(flushTimeStart))
+
+ testCases := []struct {
+ title string
+ repairState repairStatesByNs
+ expectedNS1Repairs []expectedRepair
+ expectedNS2Repairs []expectedRepair
+ }{
+ {
+ // Test that corrupt ns1 time range (flushTimeEnd, flushTimeEnd + blockSize) does not prevent past time
+ // ranges (flushTimeStart, flushTimeStart + blockSize) from being repaired. Also test that least recently
+ // repaired policy is honored even when repairing one of the time ranges (flushTimeStart, flushTimeStart +
+ // blockSize) on ns2 fails.
+ title: "attempts to keep repairing time ranges before poison time ranges",
+ repairState: repairStatesByNs{
+ "ns2": namespaceRepairStateByTime{
+ flushTimeEndNano: repairState{
+ Status: repairSuccess,
+ LastAttempt: time.Time{},
+ },
+ },
+ },
+ expectedNS1Repairs: []expectedRepair{
+ {
+ xtime.Range{Start: flushTimeEnd, End: flushTimeEnd.Add(blockSize)},
+ errors.New("ns1 repair error"),
+ },
+ {
+ xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)},
+ nil,
+ },
+ },
+ expectedNS2Repairs: []expectedRepair{
+ {
+ xtime.Range{Start: flushTimeStart, End: flushTimeStart.Add(blockSize)},
+ errors.New("ns2 repair error"),
+ },
+ {
+ xtime.Range{Start: flushTimeEnd, End: flushTimeEnd.Add(blockSize)},
+ nil,
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.title, func(t *testing.T) {
+ opts := DefaultTestOptions().SetRepairOptions(testRepairOptions(ctrl))
+ mockDatabase := NewMockdatabase(ctrl)
+
+ databaseRepairer, err := newDatabaseRepairer(mockDatabase, opts)
+ require.NoError(t, err)
+ repairer := databaseRepairer.(*dbRepairer)
+ repairer.nowFn = func() time.Time {
+ return now
+ }
+ if tc.repairState == nil {
+ tc.repairState = repairStatesByNs{}
+ }
+ repairer.repairStatesByNs = tc.repairState
+
+ mockDatabase.EXPECT().IsBootstrapped().Return(true)
+
+ var (
+ ns1 = NewMockdatabaseNamespace(ctrl)
+ ns2 = NewMockdatabaseNamespace(ctrl)
+ namespaces = []databaseNamespace{ns1, ns2}
+ )
+ ns1.EXPECT().Options().Return(nsOpts).AnyTimes()
+ ns2.EXPECT().Options().Return(nsOpts).AnyTimes()
+
+ ns1.EXPECT().ID().Return(ident.StringID("ns1")).AnyTimes()
+ ns2.EXPECT().ID().Return(ident.StringID("ns2")).AnyTimes()
+
+ //Setup expected ns1 repair invocations for each repaired time range
+ var ns1RepairExpectations = make([]*gomock.Call, len(tc.expectedNS1Repairs))
+ for i, ns1Repair := range tc.expectedNS1Repairs {
+ ns1RepairExpectations[i] = ns1.EXPECT().
+ Repair(gomock.Any(), ns1Repair.expectedRepairRange).
+ Return(ns1Repair.mockRepairResult)
+ }
+ gomock.InOrder(ns1RepairExpectations...)
+
+ //Setup expected ns2 repair invocations for each repaired time range
+ var ns2RepairExpectations = make([]*gomock.Call, len(tc.expectedNS2Repairs))
+ for i, ns2Repair := range tc.expectedNS2Repairs {
+ ns2RepairExpectations[i] = ns2.EXPECT().
+ Repair(gomock.Any(), ns2Repair.expectedRepairRange).
+ Return(ns2Repair.mockRepairResult)
+ }
+ gomock.InOrder(ns2RepairExpectations...)
+
+ mockDatabase.EXPECT().OwnedNamespaces().Return(namespaces, nil)
+
+ require.NotNil(t, repairer.Repair())
+ })
+ }
+}
diff --git a/src/dbnode/storage/series/README.md b/src/dbnode/storage/series/README.md
new file mode 100644
index 0000000000..d1d2f73230
--- /dev/null
+++ b/src/dbnode/storage/series/README.md
@@ -0,0 +1,23 @@
+# series
+
+Series related documentation.
+
+## Series flush lifecycle
+
+Warm/cold writes end up in versioned buckets based on write type (`ColdWrite` or `WarmWrite`). When flushes occur, we fetch in-mem data from all write type specific buckets to persist.
+
+For warm flushes, we write all warm written buckets to disk and mark the state of the block as `WarmRetrievable`.
+
+For cold flushes, we merge this data w/ data that's already on disk in `fs/merger.go` and write to disk. Once finished, we then update the `ColdVersionRetrievable` to the cold version we just wrote to disk.
+
+Data is only evicted from mem during a `Tick()`. This evicts either cold buckets up until flush state `ColdVersionRetrievable` or warm buckets that are marked as `WarmRetrievable` (or warm blocks that we have already warm flushed to disk).
+
+## Snapshotting/Bootstrap
+
+Snapshots work by merging all buckets for a series buffer regardless of write type into streams and persisting to disk. Snapshots are in the commitlog bootstrapper and snapshotted series data are loaded into `BufferBucket.loadedBlocks`. Attempts to call `series.LoadBlock()` for `WarmWrite` blocks will return an error if it already exists on disk.
+
+Series snapshots persist writes in both warm & cold buckets. During a flush, we persist snapshot files w/ a commit log ID. This ID is later used during the async cleanup process to deleted rotated commit logs.
+
+## Repair
+
+Shard repairs load data as cold writes into series buffer buckets.
diff --git a/src/dbnode/storage/series/buffer.go b/src/dbnode/storage/series/buffer.go
index b777c863c5..9f3f9b9802 100644
--- a/src/dbnode/storage/series/buffer.go
+++ b/src/dbnode/storage/series/buffer.go
@@ -21,6 +21,7 @@
package series
import (
+ "bytes"
"errors"
"fmt"
"sort"
@@ -28,12 +29,10 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/clock"
- "github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/storage/block"
- m3dberrors "github.com/m3db/m3/src/dbnode/storage/errors"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/x/context"
@@ -55,6 +54,7 @@ const (
var (
timeZero time.Time
errIncompleteMerge = errors.New("bucket merge did not result in only one encoder")
+ errTooManyEncoders = xerrors.NewInvalidParamsError(errors.New("too many encoders per block"))
)
const (
@@ -75,29 +75,33 @@ const (
)
type databaseBuffer interface {
+ MoveTo(
+ buffer databaseBuffer,
+ nsCtx namespace.Context,
+ ) error
+
Write(
ctx context.Context,
+ id ident.ID,
timestamp time.Time,
value float64,
unit xtime.Unit,
annotation []byte,
wOpts WriteOptions,
- ) (bool, error)
+ ) (bool, WriteType, error)
Snapshot(
ctx context.Context,
blockStart time.Time,
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
persistFn persist.DataFn,
nsCtx namespace.Context,
- ) error
+ ) (SnapshotResult, error)
WarmFlush(
ctx context.Context,
blockStart time.Time,
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
persistFn persist.DataFn,
nsCtx namespace.Context,
) (FlushOutcome, error)
@@ -113,7 +117,7 @@ type databaseBuffer interface {
start time.Time,
version int,
nsCtx namespace.Context,
- ) ([]xio.BlockReader, error)
+ ) (block.FetchBlockResult, error)
FetchBlocks(
ctx context.Context,
@@ -129,6 +133,8 @@ type databaseBuffer interface {
IsEmpty() bool
+ IsEmptyAtBlockStart(time.Time) bool
+
ColdFlushBlockStarts(blockStates map[xtime.UnixNano]BlockState) OptimizedTimes
Stats() bufferStats
@@ -141,7 +147,6 @@ type databaseBuffer interface {
}
type databaseBufferResetOptions struct {
- ID ident.ID
BlockRetriever QueryableBlockRetriever
Options Options
}
@@ -214,7 +219,6 @@ func (t *OptimizedTimes) ForEach(fn func(t xtime.UnixNano)) {
}
type dbBuffer struct {
- id ident.ID
opts Options
nowFn clock.NowFn
@@ -243,7 +247,6 @@ func newDatabaseBuffer() databaseBuffer {
}
func (b *dbBuffer) Reset(opts databaseBufferResetOptions) {
- b.id = opts.ID
b.opts = opts.Options
b.nowFn = opts.Options.ClockOptions().NowFn()
b.bucketPool = opts.Options.BufferBucketPool()
@@ -251,30 +254,65 @@ func (b *dbBuffer) Reset(opts databaseBufferResetOptions) {
b.blockRetriever = opts.BlockRetriever
}
+func (b *dbBuffer) MoveTo(
+ buffer databaseBuffer,
+ nsCtx namespace.Context,
+) error {
+ blockSize := b.opts.RetentionOptions().BlockSize()
+ for _, buckets := range b.bucketsMap {
+ for _, bucket := range buckets.buckets {
+ // Load any existing blocks.
+ for _, block := range bucket.loadedBlocks {
+ // Load block.
+ buffer.Load(block, bucket.writeType)
+ }
+
+ // Load encoders.
+ for _, elem := range bucket.encoders {
+ if elem.encoder.Len() == 0 {
+ // No data.
+ continue
+ }
+ // Take ownership of the encoder.
+ segment := elem.encoder.Discard()
+ // Create block and load into new buffer.
+ block := b.opts.DatabaseBlockOptions().DatabaseBlockPool().Get()
+ block.Reset(bucket.start, blockSize, segment, nsCtx)
+ // Load block.
+ buffer.Load(block, bucket.writeType)
+ }
+ }
+ }
+
+ return nil
+}
+
func (b *dbBuffer) Write(
ctx context.Context,
+ id ident.ID,
timestamp time.Time,
value float64,
unit xtime.Unit,
annotation []byte,
wOpts WriteOptions,
-) (bool, error) {
+) (bool, WriteType, error) {
var (
ropts = b.opts.RetentionOptions()
bufferPast = ropts.BufferPast()
bufferFuture = ropts.BufferFuture()
now = b.nowFn()
- pastLimit = now.Add(-1 * bufferPast)
- futureLimit = now.Add(bufferFuture)
+ pastLimit = now.Add(-1 * bufferPast).Truncate(time.Second)
+ futureLimit = now.Add(bufferFuture).Truncate(time.Second)
blockSize = ropts.BlockSize()
blockStart = timestamp.Truncate(blockSize)
writeType WriteType
)
+
switch {
case wOpts.BootstrapWrite:
exists, err := b.blockRetriever.IsBlockRetrievable(blockStart)
if err != nil {
- return false, err
+ return false, writeType, err
}
// Bootstrap writes are allowed to be outside of time boundaries
// and determined as cold or warm writes depending on whether
@@ -285,14 +323,14 @@ func (b *dbBuffer) Write(
writeType = ColdWrite
}
- case !pastLimit.Before(timestamp):
+ case timestamp.Before(pastLimit):
writeType = ColdWrite
if !b.opts.ColdWritesEnabled() {
- return false, xerrors.NewInvalidParamsError(
+ return false, writeType, xerrors.NewInvalidParamsError(
fmt.Errorf("datapoint too far in past: "+
"id=%s, off_by=%s, timestamp=%s, past_limit=%s, "+
"timestamp_unix_nanos=%d, past_limit_unix_nanos=%d",
- b.id.Bytes(), pastLimit.Sub(timestamp).String(),
+ id.Bytes(), pastLimit.Sub(timestamp).String(),
timestamp.Format(errTimestampFormat),
pastLimit.Format(errTimestampFormat),
timestamp.UnixNano(), pastLimit.UnixNano()))
@@ -301,11 +339,11 @@ func (b *dbBuffer) Write(
case !futureLimit.After(timestamp):
writeType = ColdWrite
if !b.opts.ColdWritesEnabled() {
- return false, xerrors.NewInvalidParamsError(
+ return false, writeType, xerrors.NewInvalidParamsError(
fmt.Errorf("datapoint too far in future: "+
"id=%s, off_by=%s, timestamp=%s, future_limit=%s, "+
"timestamp_unix_nanos=%d, future_limit_unix_nanos=%d",
- b.id.Bytes(), timestamp.Sub(futureLimit).String(),
+ id.Bytes(), timestamp.Sub(futureLimit).String(),
timestamp.Format(errTimestampFormat),
futureLimit.Format(errTimestampFormat),
timestamp.UnixNano(), futureLimit.UnixNano()))
@@ -327,18 +365,33 @@ func (b *dbBuffer) Write(
if wOpts.SkipOutOfRetention {
// Allow for datapoint to be skipped since caller does not
// want writes out of retention to fail.
- return false, nil
+ return false, writeType, nil
}
- return false, m3dberrors.ErrTooPast
+ return false, writeType, xerrors.NewInvalidParamsError(
+ fmt.Errorf("datapoint too far in past and out of retention: "+
+ "id=%s, off_by=%s, timestamp=%s, retention_past_limit=%s, "+
+ "timestamp_unix_nanos=%d, retention_past_limit_unix_nanos=%d",
+ id.Bytes(), retentionLimit.Sub(timestamp).String(),
+ timestamp.Format(errTimestampFormat),
+ retentionLimit.Format(errTimestampFormat),
+ timestamp.UnixNano(), retentionLimit.UnixNano()))
}
- if !now.Add(ropts.FutureRetentionPeriod()).Add(blockSize).After(timestamp) {
+ futureRetentionLimit := now.Add(ropts.FutureRetentionPeriod())
+ if !futureRetentionLimit.After(timestamp) {
if wOpts.SkipOutOfRetention {
// Allow for datapoint to be skipped since caller does not
// want writes out of retention to fail.
- return false, nil
+ return false, writeType, nil
}
- return false, m3dberrors.ErrTooFuture
+ return false, writeType, xerrors.NewInvalidParamsError(
+ fmt.Errorf("datapoint too far in future and out of retention: "+
+ "id=%s, off_by=%s, timestamp=%s, retention_future_limit=%s, "+
+ "timestamp_unix_nanos=%d, retention_future_limit_unix_nanos=%d",
+ id.Bytes(), timestamp.Sub(futureRetentionLimit).String(),
+ timestamp.Format(errTimestampFormat),
+ futureRetentionLimit.Format(errTimestampFormat),
+ timestamp.UnixNano(), futureRetentionLimit.UnixNano()))
}
b.opts.Stats().IncColdWrites()
@@ -355,7 +408,8 @@ func (b *dbBuffer) Write(
value = wOpts.TransformOptions.ForceValue
}
- return buckets.write(timestamp, value, unit, annotation, writeType, wOpts.SchemaDesc)
+ ok, err := buckets.write(timestamp, value, unit, annotation, writeType, wOpts.SchemaDesc)
+ return ok, writeType, err
}
func (b *dbBuffer) IsEmpty() bool {
@@ -365,6 +419,14 @@ func (b *dbBuffer) IsEmpty() bool {
return len(b.bucketsMap) == 0
}
+func (b *dbBuffer) IsEmptyAtBlockStart(start time.Time) bool {
+ bv, exists := b.bucketVersionsAt(start)
+ if !exists {
+ return true
+ }
+ return bv.streamsLen() == 0
+}
+
func (b *dbBuffer) ColdFlushBlockStarts(blockStates map[xtime.UnixNano]BlockState) OptimizedTimes {
var times OptimizedTimes
@@ -445,6 +507,8 @@ func (b *dbBuffer) Tick(blockStates ShardBlockStateSnapshot, nsCtx namespace.Con
}
}
+ buckets.recordActiveEncoders()
+
// Once we've evicted all eligible buckets, we merge duplicate encoders
// in the remaining ones to try and reclaim memory.
merges, err := buckets.merge(WarmWrite, nsCtx)
@@ -474,14 +538,18 @@ func (b *dbBuffer) Load(bl block.DatabaseBlock, writeType WriteType) {
func (b *dbBuffer) Snapshot(
ctx context.Context,
blockStart time.Time,
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
persistFn persist.DataFn,
nsCtx namespace.Context,
-) error {
+) (SnapshotResult, error) {
+ var (
+ start = b.nowFn()
+ result SnapshotResult
+ )
+
buckets, exists := b.bucketVersionsAt(blockStart)
if !exists {
- return nil
+ return result, nil
}
// Snapshot must take both cold and warm writes because cold flushes don't
@@ -489,13 +557,22 @@ func (b *dbBuffer) Snapshot(
// warm flush has happened).
streams, err := buckets.mergeToStreams(ctx, streamsOptions{filterWriteType: false, nsCtx: nsCtx})
if err != nil {
- return err
+ return result, err
}
- numStreams := len(streams)
- var mergedStream xio.SegmentReader
- if numStreams == 1 {
- mergedStream = streams[0]
+ afterMergeByBucket := b.nowFn()
+ result.Stats.TimeMergeByBucket = afterMergeByBucket.Sub(start)
+
+ var (
+ numStreams = len(streams)
+ mergeAcrossBuckets = numStreams != 1
+ segment ts.Segment
+ )
+ if !mergeAcrossBuckets {
+ segment, err = streams[0].Segment()
+ if err != nil {
+ return result, err
+ }
} else {
// We may need to merge again here because the regular merge method does
// not merge warm and cold buckets or buckets that have different versions.
@@ -508,8 +585,11 @@ func (b *dbBuffer) Snapshot(
encoder := bopts.EncoderPool().Get()
encoder.Reset(blockStart, bopts.DatabaseBlockAllocSize(), nsCtx.Schema)
iter := b.opts.MultiReaderIteratorPool().Get()
+ var encoderClosed bool
defer func() {
- encoder.Close()
+ if !encoderClosed {
+ encoder.Close()
+ }
iter.Close()
}()
iter.Reset(sr, blockStart, b.opts.RetentionOptions().BlockSize(), nsCtx.Schema)
@@ -517,40 +597,45 @@ func (b *dbBuffer) Snapshot(
for iter.Next() {
dp, unit, annotation := iter.Current()
if err := encoder.Encode(dp, unit, annotation); err != nil {
- return err
+ return result, err
}
}
if err := iter.Err(); err != nil {
- return err
+ return result, err
}
- var ok bool
- mergedStream, ok = encoder.Stream(ctx)
- if !ok {
- // Don't write out series with no data.
- return nil
- }
+ segment = encoder.Discard()
+ defer segment.Finalize()
+ encoderClosed = true
}
- segment, err := mergedStream.Segment()
- if err != nil {
- return err
- }
+ afterMergeAcrossBuckets := b.nowFn()
+ result.Stats.TimeMergeAcrossBuckets = afterMergeAcrossBuckets.Sub(afterMergeByBucket)
if segment.Len() == 0 {
// Don't write out series with no data.
- return nil
+ return result, nil
+ }
+
+ checksum := segment.CalculateChecksum()
+
+ afterChecksum := b.nowFn()
+ result.Stats.TimeChecksum = afterChecksum.Sub(afterMergeAcrossBuckets)
+
+ if err := persistFn(metadata, segment, checksum); err != nil {
+ return result, err
}
- checksum := digest.SegmentChecksum(segment)
- return persistFn(id, tags, segment, checksum)
+ result.Stats.TimePersist = b.nowFn().Sub(afterChecksum)
+
+ result.Persist = true
+ return result, nil
}
func (b *dbBuffer) WarmFlush(
ctx context.Context,
blockStart time.Time,
- id ident.ID,
- tags ident.Tags,
+ metadata persist.Metadata,
persistFn persist.DataFn,
nsCtx namespace.Context,
) (FlushOutcome, error) {
@@ -603,8 +688,8 @@ func (b *dbBuffer) WarmFlush(
return FlushOutcomeBlockDoesNotExist, nil
}
- checksum := digest.SegmentChecksum(segment)
- err = persistFn(id, tags, segment, checksum)
+ checksum := segment.CalculateChecksum()
+ err = persistFn(metadata, segment, checksum)
if err != nil {
return FlushOutcomeErr, err
}
@@ -668,7 +753,7 @@ func (b *dbBuffer) FetchBlocksForColdFlush(
start time.Time,
version int,
nsCtx namespace.Context,
-) ([]xio.BlockReader, error) {
+) (block.FetchBlockResult, error) {
res := b.fetchBlocks(ctx, []time.Time{start},
streamsOptions{filterWriteType: true, writeType: ColdWrite, nsCtx: nsCtx})
if len(res) == 0 {
@@ -676,27 +761,36 @@ func (b *dbBuffer) FetchBlocksForColdFlush(
// which blocks have cold data that have not yet been flushed.
// If we don't get data here, it means that it has since fallen out of
// retention and has been evicted.
- return nil, nil
+ return block.FetchBlockResult{}, nil
}
if len(res) != 1 {
// Must be only one result if anything at all, since fetchBlocks returns
// one result per block start.
- return nil, fmt.Errorf("fetchBlocks did not return just one block for block start %s", start)
+ return block.FetchBlockResult{}, fmt.Errorf("fetchBlocks did not return just one block for block start %s", start)
}
- blocks := res[0].Blocks
+ result := res[0]
buckets, exists := b.bucketVersionsAt(start)
if !exists {
- return nil, fmt.Errorf("buckets do not exist with block start %s", start)
+ return block.FetchBlockResult{}, fmt.Errorf("buckets do not exist with block start %s", start)
}
if bucket, exists := buckets.writableBucket(ColdWrite); exists {
+ // Update the version of the writable bucket (effectively making it not
+ // writable). This marks this bucket as attempted to be flushed,
+ // although it is only actually written to disk successfully at the
+ // shard level after every series has completed the flush process.
+ // The tick following a successful flush to disk will remove this bucket
+ // from memory.
bucket.version = version
- } else {
- return nil, fmt.Errorf("writable bucket does not exist with block start %s", start)
}
+ // No-op if the writable bucket doesn't exist.
+ // This function should only get called for blocks that we know need to be
+ // cold flushed. However, buckets that get attempted to be cold flushed and
+ // fail need to get cold flushed as well. These kinds of buckets will have
+ // a non-writable version.
- return blocks, nil
+ return result, nil
}
func (b *dbBuffer) FetchBlocks(ctx context.Context, starts []time.Time, nsCtx namespace.Context) []block.FetchBlockResult {
@@ -716,8 +810,15 @@ func (b *dbBuffer) fetchBlocks(
continue
}
- if streams := buckets.streams(ctx, sOpts); len(streams) > 0 {
- res = append(res, block.NewFetchBlockResult(start, streams, nil))
+ streams := buckets.streams(ctx, sOpts)
+ if len(streams) > 0 {
+ result := block.NewFetchBlockResult(
+ start,
+ streams,
+ nil,
+ )
+ result.FirstWrite = buckets.firstWrite(sOpts)
+ res = append(res, result)
}
}
@@ -935,14 +1036,30 @@ func (b *BufferBucketVersions) resetTo(
func (b *BufferBucketVersions) streams(ctx context.Context, opts streamsOptions) []xio.BlockReader {
var res []xio.BlockReader
for _, bucket := range b.buckets {
- if !opts.filterWriteType || bucket.writeType == opts.writeType {
- res = append(res, bucket.streams(ctx)...)
+ if opts.filterWriteType && bucket.writeType != opts.writeType {
+ continue
}
+ res = append(res, bucket.streams(ctx)...)
}
return res
}
+func (b *BufferBucketVersions) firstWrite(opts streamsOptions) time.Time {
+ var res time.Time
+ for _, bucket := range b.buckets {
+ if opts.filterWriteType && bucket.writeType != opts.writeType {
+ continue
+ }
+ // Get the earliest valid first write time.
+ if res.IsZero() ||
+ (bucket.firstWrite.Before(res) && !bucket.firstWrite.IsZero()) {
+ res = bucket.firstWrite
+ }
+ }
+ return res
+}
+
func (b *BufferBucketVersions) streamsLen() int {
res := 0
for _, bucket := range b.buckets {
@@ -1048,21 +1165,32 @@ func (b *BufferBucketVersions) mergeToStreams(ctx context.Context, opts streamsO
res := make([]xio.SegmentReader, 0, len(buckets))
for _, bucket := range buckets {
- if !opts.filterWriteType || bucket.writeType == opts.writeType {
- stream, ok, err := bucket.mergeToStream(ctx, opts.nsCtx)
- if err != nil {
- return nil, err
- }
- if !ok {
- continue
- }
- res = append(res, stream)
+ if opts.filterWriteType && bucket.writeType != opts.writeType {
+ continue
+ }
+ stream, ok, err := bucket.mergeToStream(ctx, opts.nsCtx)
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ continue
}
+ res = append(res, stream)
}
return res, nil
}
+func (b *BufferBucketVersions) recordActiveEncoders() {
+ var numActiveEncoders int
+ for _, bucket := range b.buckets {
+ if bucket.version == writableBucketVersion {
+ numActiveEncoders += len(bucket.encoders)
+ }
+ }
+ b.opts.Stats().RecordEncodersPerBlock(numActiveEncoders)
+}
+
type streamsOptions struct {
filterWriteType bool
writeType WriteType
@@ -1079,6 +1207,7 @@ type BufferBucket struct {
loadedBlocks []block.DatabaseBlock
version int
writeType WriteType
+ firstWrite time.Time
}
type inOrderEncoder struct {
@@ -1105,6 +1234,7 @@ func (b *BufferBucket) resetTo(
// We would only ever create a bucket for it to be writable.
b.version = writableBucketVersion
b.writeType = writeType
+ b.firstWrite = time.Time{}
}
func (b *BufferBucket) reset() {
@@ -1120,8 +1250,9 @@ func (b *BufferBucket) write(
schema namespace.SchemaDescr,
) (bool, error) {
datapoint := ts.Datapoint{
- Timestamp: timestamp,
- Value: value,
+ Timestamp: timestamp,
+ TimestampNanos: xtime.ToUnixNano(timestamp),
+ Value: value,
}
// Find the correct encoder to write to
@@ -1129,11 +1260,16 @@ func (b *BufferBucket) write(
for i := range b.encoders {
lastWriteAt := b.encoders[i].lastWriteAt
if timestamp.Equal(lastWriteAt) {
- last, err := b.encoders[i].encoder.LastEncoded()
+ lastDatapoint, err := b.encoders[i].encoder.LastEncoded()
+ if err != nil {
+ return false, err
+ }
+ lastAnnotation, err := b.encoders[i].encoder.LastAnnotation()
if err != nil {
return false, err
}
- if last.Value == value {
+
+ if lastDatapoint.Value == value && bytes.Equal(lastAnnotation, annotation) {
// No-op since matches the current value. Propagates up to callers that
// no value was written.
return false, nil
@@ -1147,17 +1283,31 @@ func (b *BufferBucket) write(
}
}
+ var err error
+ defer func() {
+ nowFn := b.opts.ClockOptions().NowFn()
+ if err == nil && b.firstWrite.IsZero() {
+ b.firstWrite = nowFn()
+ }
+ }()
+
// Upsert/last-write-wins semantics.
// NB(r): We push datapoints with the same timestamp but differing
// value into a new encoder later in the stack of in order encoders
// since an encoder is immutable.
// The encoders pushed later will surface their values first.
if idx != -1 {
- err := b.writeToEncoderIndex(idx, datapoint, unit, annotation, schema)
+ err = b.writeToEncoderIndex(idx, datapoint, unit, annotation, schema)
return err == nil, err
}
- // Need a new encoder, we didn't find an encoder to write to
+ // Need a new encoder, we didn't find an encoder to write to.
+ maxEncoders := b.opts.RuntimeOptionsManager().Get().EncodersPerBlockLimit()
+ if maxEncoders != 0 && len(b.encoders) >= int(maxEncoders) {
+ b.opts.Stats().IncEncoderLimitWriteRejected()
+ return false, errTooManyEncoders
+ }
+
b.opts.Stats().IncCreatedEncoders()
bopts := b.opts.DatabaseBlockOptions()
blockSize := b.opts.RetentionOptions().BlockSize()
@@ -1172,7 +1322,7 @@ func (b *BufferBucket) write(
})
idx = len(b.encoders) - 1
- err := b.writeToEncoderIndex(idx, datapoint, unit, annotation, schema)
+ err = b.writeToEncoderIndex(idx, datapoint, unit, annotation, schema)
if err != nil {
encoder.Close()
b.encoders = b.encoders[:idx]
@@ -1253,7 +1403,7 @@ func (b *BufferBucket) checksumIfSingleStream(ctx context.Context) (*uint32, err
return nil, nil
}
- checksum := digest.SegmentChecksum(segment)
+ checksum := segment.CalculateChecksum()
return &checksum, nil
}
diff --git a/src/dbnode/storage/series/buffer_mock.go b/src/dbnode/storage/series/buffer_mock.go
index dfa5d636df..ef7666df64 100644
--- a/src/dbnode/storage/series/buffer_mock.go
+++ b/src/dbnode/storage/series/buffer_mock.go
@@ -62,48 +62,64 @@ func (m *MockdatabaseBuffer) EXPECT() *MockdatabaseBufferMockRecorder {
return m.recorder
}
+// MoveTo mocks base method
+func (m *MockdatabaseBuffer) MoveTo(buffer databaseBuffer, nsCtx namespace.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MoveTo", buffer, nsCtx)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// MoveTo indicates an expected call of MoveTo
+func (mr *MockdatabaseBufferMockRecorder) MoveTo(buffer, nsCtx interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveTo", reflect.TypeOf((*MockdatabaseBuffer)(nil).MoveTo), buffer, nsCtx)
+}
+
// Write mocks base method
-func (m *MockdatabaseBuffer) Write(ctx context.Context, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts WriteOptions) (bool, error) {
+func (m *MockdatabaseBuffer) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts WriteOptions) (bool, WriteType, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Write", ctx, timestamp, value, unit, annotation, wOpts)
+ ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation, wOpts)
ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(WriteType)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
// Write indicates an expected call of Write
-func (mr *MockdatabaseBufferMockRecorder) Write(ctx, timestamp, value, unit, annotation, wOpts interface{}) *gomock.Call {
+func (mr *MockdatabaseBufferMockRecorder) Write(ctx, id, timestamp, value, unit, annotation, wOpts interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockdatabaseBuffer)(nil).Write), ctx, timestamp, value, unit, annotation, wOpts)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockdatabaseBuffer)(nil).Write), ctx, id, timestamp, value, unit, annotation, wOpts)
}
// Snapshot mocks base method
-func (m *MockdatabaseBuffer) Snapshot(ctx context.Context, blockStart time.Time, id ident.ID, tags ident.Tags, persistFn persist.DataFn, nsCtx namespace.Context) error {
+func (m *MockdatabaseBuffer) Snapshot(ctx context.Context, blockStart time.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) (SnapshotResult, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Snapshot", ctx, blockStart, id, tags, persistFn, nsCtx)
- ret0, _ := ret[0].(error)
- return ret0
+ ret := m.ctrl.Call(m, "Snapshot", ctx, blockStart, metadata, persistFn, nsCtx)
+ ret0, _ := ret[0].(SnapshotResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// Snapshot indicates an expected call of Snapshot
-func (mr *MockdatabaseBufferMockRecorder) Snapshot(ctx, blockStart, id, tags, persistFn, nsCtx interface{}) *gomock.Call {
+func (mr *MockdatabaseBufferMockRecorder) Snapshot(ctx, blockStart, metadata, persistFn, nsCtx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockdatabaseBuffer)(nil).Snapshot), ctx, blockStart, id, tags, persistFn, nsCtx)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockdatabaseBuffer)(nil).Snapshot), ctx, blockStart, metadata, persistFn, nsCtx)
}
// WarmFlush mocks base method
-func (m *MockdatabaseBuffer) WarmFlush(ctx context.Context, blockStart time.Time, id ident.ID, tags ident.Tags, persistFn persist.DataFn, nsCtx namespace.Context) (FlushOutcome, error) {
+func (m *MockdatabaseBuffer) WarmFlush(ctx context.Context, blockStart time.Time, metadata persist.Metadata, persistFn persist.DataFn, nsCtx namespace.Context) (FlushOutcome, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "WarmFlush", ctx, blockStart, id, tags, persistFn, nsCtx)
+ ret := m.ctrl.Call(m, "WarmFlush", ctx, blockStart, metadata, persistFn, nsCtx)
ret0, _ := ret[0].(FlushOutcome)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WarmFlush indicates an expected call of WarmFlush
-func (mr *MockdatabaseBufferMockRecorder) WarmFlush(ctx, blockStart, id, tags, persistFn, nsCtx interface{}) *gomock.Call {
+func (mr *MockdatabaseBufferMockRecorder) WarmFlush(ctx, blockStart, metadata, persistFn, nsCtx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WarmFlush", reflect.TypeOf((*MockdatabaseBuffer)(nil).WarmFlush), ctx, blockStart, id, tags, persistFn, nsCtx)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WarmFlush", reflect.TypeOf((*MockdatabaseBuffer)(nil).WarmFlush), ctx, blockStart, metadata, persistFn, nsCtx)
}
// ReadEncoded mocks base method
@@ -122,10 +138,10 @@ func (mr *MockdatabaseBufferMockRecorder) ReadEncoded(ctx, start, end, nsCtx int
}
// FetchBlocksForColdFlush mocks base method
-func (m *MockdatabaseBuffer) FetchBlocksForColdFlush(ctx context.Context, start time.Time, version int, nsCtx namespace.Context) ([]xio.BlockReader, error) {
+func (m *MockdatabaseBuffer) FetchBlocksForColdFlush(ctx context.Context, start time.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchBlocksForColdFlush", ctx, start, version, nsCtx)
- ret0, _ := ret[0].([]xio.BlockReader)
+ ret0, _ := ret[0].(block.FetchBlockResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -179,6 +195,20 @@ func (mr *MockdatabaseBufferMockRecorder) IsEmpty() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsEmpty", reflect.TypeOf((*MockdatabaseBuffer)(nil).IsEmpty))
}
+// IsEmptyAtBlockStart mocks base method
+func (m *MockdatabaseBuffer) IsEmptyAtBlockStart(arg0 time.Time) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsEmptyAtBlockStart", arg0)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsEmptyAtBlockStart indicates an expected call of IsEmptyAtBlockStart
+func (mr *MockdatabaseBufferMockRecorder) IsEmptyAtBlockStart(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsEmptyAtBlockStart", reflect.TypeOf((*MockdatabaseBuffer)(nil).IsEmptyAtBlockStart), arg0)
+}
+
// ColdFlushBlockStarts mocks base method
func (m *MockdatabaseBuffer) ColdFlushBlockStarts(blockStates map[time0.UnixNano]BlockState) OptimizedTimes {
m.ctrl.T.Helper()
diff --git a/src/dbnode/storage/series/buffer_proto_test.go b/src/dbnode/storage/series/buffer_proto_test.go
index 926c8bd0bb..a67e1145c5 100644
--- a/src/dbnode/storage/series/buffer_proto_test.go
+++ b/src/dbnode/storage/series/buffer_proto_test.go
@@ -24,6 +24,7 @@ import (
"testing"
"time"
+ m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/testdata/prototest"
"github.com/m3db/m3/src/x/ident"
@@ -49,7 +50,8 @@ func newBufferTestProtoOptions(t *testing.T) Options {
SetEncoderPool(prototest.ProtoPools.EncoderPool).
SetMultiReaderIteratorPool(prototest.ProtoPools.MultiReaderIterPool).
SetBufferBucketPool(bufferBucketPool).
- SetBufferBucketVersionsPool(bufferBucketVersionsPool)
+ SetBufferBucketVersionsPool(bufferBucketVersionsPool).
+ SetRuntimeOptionsManager(m3dbruntime.NewOptionsManager())
opts = opts.
SetRetentionOptions(opts.RetentionOptions().
SetBlockSize(2 * time.Minute).
diff --git a/src/dbnode/storage/series/buffer_test.go b/src/dbnode/storage/series/buffer_test.go
index 95484c3de5..61141c8bae 100644
--- a/src/dbnode/storage/series/buffer_test.go
+++ b/src/dbnode/storage/series/buffer_test.go
@@ -29,9 +29,12 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
+ "github.com/m3db/m3/src/dbnode/persist"
+ m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
@@ -44,6 +47,10 @@ import (
"github.com/stretchr/testify/require"
)
+var (
+ testID = ident.StringID("foo")
+)
+
func newBufferTestOptions() Options {
encoderPool := encoding.NewEncoderPool(nil)
multiReaderIteratorPool := encoding.NewMultiReaderIteratorPool(nil)
@@ -64,7 +71,8 @@ func newBufferTestOptions() Options {
SetEncoderPool(encoderPool).
SetMultiReaderIteratorPool(multiReaderIteratorPool).
SetBufferBucketPool(bufferBucketPool).
- SetBufferBucketVersionsPool(bufferBucketVersionsPool)
+ SetBufferBucketVersionsPool(bufferBucketVersionsPool).
+ SetRuntimeOptionsManager(m3dbruntime.NewOptionsManager())
opts = opts.
SetRetentionOptions(opts.RetentionOptions().
SetBlockSize(2 * time.Minute).
@@ -77,14 +85,37 @@ func newBufferTestOptions() Options {
}
// Writes to buffer, verifying no error and that further writes should happen.
-func verifyWriteToBuffer(t *testing.T, buffer databaseBuffer,
- v DecodedTestValue, schema namespace.SchemaDescr) {
+func verifyWriteToBufferSuccess(
+ t *testing.T,
+ id ident.ID,
+ buffer databaseBuffer,
+ v DecodedTestValue,
+ schema namespace.SchemaDescr,
+) {
+ verifyWriteToBuffer(t, id, buffer, v, schema, true, false)
+}
+
+func verifyWriteToBuffer(
+ t *testing.T,
+ id ident.ID,
+ buffer databaseBuffer,
+ v DecodedTestValue,
+ schema namespace.SchemaDescr,
+ expectWritten bool,
+ expectErr bool,
+) {
ctx := context.NewContext()
- wasWritten, err := buffer.Write(ctx, v.Timestamp, v.Value, v.Unit,
+ defer ctx.Close()
+
+ wasWritten, _, err := buffer.Write(ctx, id, v.Timestamp, v.Value, v.Unit,
v.Annotation, WriteOptions{SchemaDesc: schema})
- require.NoError(t, err)
- require.True(t, wasWritten)
- ctx.Close()
+
+ if expectErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ require.Equal(t, expectWritten, wasWritten)
}
func TestBufferWriteTooFuture(t *testing.T) {
@@ -96,13 +127,12 @@ func TestBufferWriteTooFuture(t *testing.T) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
ctx := context.NewContext()
defer ctx.Close()
- wasWritten, err := buffer.Write(ctx, curr.Add(rops.BufferFuture()), 1,
+ wasWritten, _, err := buffer.Write(ctx, testID, curr.Add(rops.BufferFuture()), 1,
xtime.Second, nil, WriteOptions{})
assert.False(t, wasWritten)
assert.Error(t, err)
@@ -122,12 +152,14 @@ func TestBufferWriteTooPast(t *testing.T) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
ctx := context.NewContext()
defer ctx.Close()
- wasWritten, err := buffer.Write(ctx, curr.Add(-1*rops.BufferPast()), 1, xtime.Second,
+ // Writes are inclusive on buffer past start border. Must be before that inclusive border to
+ // be a cold write. To test this we write a second further into the past.
+ wasWritten, _, err := buffer.Write(ctx, testID,
+ curr.Add(-1*rops.BufferPast()-time.Second), 1, xtime.Second,
nil, WriteOptions{})
assert.False(t, wasWritten)
assert.Error(t, err)
@@ -138,6 +170,68 @@ func TestBufferWriteTooPast(t *testing.T) {
assert.True(t, strings.Contains(err.Error(), "past_limit="))
}
+func maxDuration(a, b time.Duration) time.Duration {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func TestBufferWriteColdTooFutureRetention(t *testing.T) {
+ opts := newBufferTestOptions().SetColdWritesEnabled(true)
+ rops := opts.RetentionOptions()
+ curr := time.Now().Truncate(rops.BlockSize())
+ opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
+ return curr
+ }))
+ buffer := newDatabaseBuffer().(*dbBuffer)
+ buffer.Reset(databaseBufferResetOptions{
+ Options: opts,
+ })
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ futureRetention := time.Second +
+ maxDuration(rops.BufferFuture(), rops.FutureRetentionPeriod())
+ wasWritten, _, err := buffer.Write(ctx,
+ testID, curr.Add(futureRetention), 1, xtime.Second, nil, WriteOptions{})
+ assert.False(t, wasWritten)
+ assert.Error(t, err)
+ assert.True(t, xerrors.IsInvalidParams(err))
+ assert.True(t, strings.Contains(err.Error(), "datapoint too far in future and out of retention"))
+ assert.True(t, strings.Contains(err.Error(), "id=foo"))
+ assert.True(t, strings.Contains(err.Error(), "timestamp="))
+ assert.True(t, strings.Contains(err.Error(), "retention_future_limit="))
+}
+
+func TestBufferWriteColdTooPastRetention(t *testing.T) {
+ opts := newBufferTestOptions().SetColdWritesEnabled(true)
+ rops := opts.RetentionOptions()
+ curr := time.Now().Truncate(rops.BlockSize())
+ opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
+ return curr
+ }))
+ buffer := newDatabaseBuffer().(*dbBuffer)
+ buffer.Reset(databaseBufferResetOptions{
+ Options: opts,
+ })
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ pastRetention := time.Second +
+ maxDuration(rops.BufferPast(), rops.RetentionPeriod())
+ wasWritten, _, err := buffer.Write(ctx, testID,
+ curr.Add(-pastRetention), 1, xtime.Second,
+ nil, WriteOptions{})
+ assert.False(t, wasWritten)
+ assert.Error(t, err)
+ assert.True(t, xerrors.IsInvalidParams(err))
+ assert.True(t, strings.Contains(err.Error(), "datapoint too far in past and out of retention"))
+ assert.True(t, strings.Contains(err.Error(), "id=foo"))
+ assert.True(t, strings.Contains(err.Error(), "timestamp="))
+ assert.True(t, strings.Contains(err.Error(), "retention_past_limit="))
+}
+
func TestBufferWriteError(t *testing.T) {
var (
opts = newBufferTestOptions()
@@ -150,13 +244,13 @@ func TestBufferWriteError(t *testing.T) {
return curr
}))
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
defer ctx.Close()
timeUnitNotExist := xtime.Unit(127)
- wasWritten, err := buffer.Write(ctx, curr, 1, timeUnitNotExist, nil, WriteOptions{})
+ wasWritten, _, err := buffer.Write(ctx, testID,
+ curr, 1, timeUnitNotExist, nil, WriteOptions{})
require.False(t, wasWritten)
require.Error(t, err)
}
@@ -174,7 +268,6 @@ func testBufferWriteRead(t *testing.T, opts Options, setAnn setAnnotation) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -190,7 +283,7 @@ func testBufferWriteRead(t *testing.T, opts Options, setAnn setAnnotation) {
}
for _, v := range data {
- verifyWriteToBuffer(t, buffer, v, nsCtx.Schema)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
}
ctx := context.NewContext()
@@ -213,7 +306,6 @@ func TestBufferReadOnlyMatchingBuckets(t *testing.T) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -224,7 +316,7 @@ func TestBufferReadOnlyMatchingBuckets(t *testing.T) {
for _, v := range data {
curr = v.Timestamp
- verifyWriteToBuffer(t, buffer, v, nil)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
}
ctx := context.NewContext()
@@ -256,7 +348,6 @@ func TestBufferWriteOutOfOrder(t *testing.T) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -270,7 +361,7 @@ func TestBufferWriteOutOfOrder(t *testing.T) {
if v.Timestamp.After(curr) {
curr = v.Timestamp
}
- verifyWriteToBuffer(t, buffer, v, nil)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
}
buckets, ok := buffer.bucketVersionsAt(start)
@@ -334,6 +425,7 @@ func newTestBufferBucketWithCustomData(
) (*BufferBucket, []DecodedTestValue) {
b := &BufferBucket{opts: opts}
b.resetTo(bd.start, bd.writeType, opts)
+ b.firstWrite = opts.ClockOptions().NowFn()()
data := bd.data
// Empty all existing encoders.
@@ -386,9 +478,10 @@ func newTestBufferBucketVersionsWithCustomData(
) (*BufferBucketVersions, []DecodedTestValue) {
newBucket, vals := newTestBufferBucketWithCustomData(t, bd, opts, setAnn)
return &BufferBucketVersions{
- buckets: []*BufferBucket{newBucket},
- start: newBucket.start,
- opts: opts,
+ buckets: []*BufferBucket{newBucket},
+ start: newBucket.start,
+ opts: opts,
+ bucketPool: opts.BufferBucketPool(),
}, vals
}
@@ -400,7 +493,6 @@ func newTestBufferWithCustomData(
) (*dbBuffer, map[xtime.UnixNano][]DecodedTestValue) {
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
expectedMap := make(map[xtime.UnixNano][]DecodedTestValue)
@@ -626,7 +718,6 @@ func TestIndexedBufferWriteOnlyWritesSinglePoint(t *testing.T) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -646,7 +737,8 @@ func TestIndexedBufferWriteOnlyWritesSinglePoint(t *testing.T) {
ForceValue: forceValue,
},
}
- wasWritten, err := buffer.Write(ctx, v.Timestamp, v.Value, v.Unit,
+ wasWritten, _, err := buffer.Write(ctx, testID,
+ v.Timestamp, v.Value, v.Unit,
v.Annotation, writeOpts)
require.NoError(t, err)
expectedWrite := i == 0
@@ -680,7 +772,6 @@ func testBufferFetchBlocks(t *testing.T, opts Options, setAnn setAnnotation) {
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
buffer.bucketsMap[xtime.ToUnixNano(b.start)] = b
@@ -765,7 +856,6 @@ func TestBufferFetchBlocksOneResultPerBlock(t *testing.T) {
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
buffer.bucketsMap[xtime.ToUnixNano(b.start)] = b
@@ -792,7 +882,6 @@ func TestBufferFetchBlocksMetadata(t *testing.T) {
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
buffer.bucketsMap[xtime.ToUnixNano(b.start)] = b
@@ -843,7 +932,6 @@ func TestBufferTickReordersOutOfOrderBuffers(t *testing.T) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -860,7 +948,7 @@ func TestBufferTickReordersOutOfOrderBuffers(t *testing.T) {
for _, v := range data {
curr = v.Timestamp
- verifyWriteToBuffer(t, buffer, v, nil)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
}
var encoders []encoding.Encoder
@@ -934,7 +1022,6 @@ func TestBufferRemoveBucket(t *testing.T) {
}))
buffer := newDatabaseBuffer().(*dbBuffer)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -950,7 +1037,7 @@ func TestBufferRemoveBucket(t *testing.T) {
for _, v := range data {
curr = v.Timestamp
- verifyWriteToBuffer(t, buffer, v, nil)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nil)
}
buckets, exists := buffer.bucketVersionsAt(start)
@@ -1032,7 +1119,6 @@ func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) {
return curr
}))
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -1040,7 +1126,8 @@ func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) {
ctx := context.NewContext()
defer ctx.Close()
- wasWritten, err := buffer.Write(ctx, curr, 1, xtime.Second, nil, WriteOptions{})
+ wasWritten, _, err := buffer.Write(ctx, testID,
+ curr, 1, xtime.Second, nil, WriteOptions{})
require.NoError(t, err)
require.True(t, wasWritten)
@@ -1064,21 +1151,26 @@ func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) {
}
require.Equal(t, 1, len(encoders))
- assertPersistDataFn := func(id ident.ID, tags ident.Tags, segment ts.Segment, checlsum uint32) error {
+ assertPersistDataFn := func(persist.Metadata, ts.Segment, uint32) error {
t.Fatal("persist fn should not have been called")
return nil
}
+ metadata := persist.NewMetadata(doc.Document{
+ ID: []byte("some-id"),
+ })
+
if testSnapshot {
ctx = context.NewContext()
defer ctx.Close()
- err = buffer.Snapshot(ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, namespace.Context{})
+
+ _, err = buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, namespace.Context{})
assert.NoError(t, err)
} else {
ctx = context.NewContext()
defer ctx.Close()
_, err = buffer.WarmFlush(
- ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, namespace.Context{})
+ ctx, start, metadata, assertPersistDataFn, namespace.Context{})
require.NoError(t, err)
}
}
@@ -1106,7 +1198,6 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) {
defer ctx.Close()
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -1132,7 +1223,7 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) {
// Perform the writes.
for _, v := range data {
curr = v.Timestamp
- verifyWriteToBuffer(t, buffer, v, nsCtx.Schema)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
}
// Verify internal state.
@@ -1154,7 +1245,7 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) {
assert.Equal(t, 2, len(encoders))
- assertPersistDataFn := func(id ident.ID, tags ident.Tags, segment ts.Segment, checlsum uint32) error {
+ assertPersistDataFn := func(metadata persist.Metadata, segment ts.Segment, checlsum uint32) error {
// Check we got the right results.
expectedData := data[:len(data)-1] // -1 because we don't expect the last datapoint.
expectedCopy := make([]DecodedTestValue, len(expectedData))
@@ -1171,7 +1262,11 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) {
}
// Perform a snapshot.
- err := buffer.Snapshot(ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, nsCtx)
+ metadata := persist.NewMetadata(doc.Document{
+ ID: []byte("some-id"),
+ })
+
+ _, err := buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, nsCtx)
assert.NoError(t, err)
// Check internal state to make sure the merge happened and was persisted.
@@ -1209,7 +1304,6 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) {
return curr
}))
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
@@ -1232,7 +1326,7 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) {
for _, v := range warmData {
// Set curr so that every write is a warm write.
curr = v.Timestamp
- verifyWriteToBuffer(t, buffer, v, nsCtx.Schema)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
}
// Also add cold writes to the buffer to verify that Snapshot will capture
@@ -1255,7 +1349,7 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) {
// Perform cold writes.
for _, v := range coldData {
- verifyWriteToBuffer(t, buffer, v, nsCtx.Schema)
+ verifyWriteToBufferSuccess(t, testID, buffer, v, nsCtx.Schema)
}
// Verify internal state.
@@ -1295,7 +1389,7 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) {
}
assert.Equal(t, 2, len(coldEncoders))
- assertPersistDataFn := func(id ident.ID, tags ident.Tags, segment ts.Segment, checlsum uint32) error {
+ assertPersistDataFn := func(metadata persist.Metadata, segment ts.Segment, checlsum uint32) error {
// Check we got the right results.
// `len(warmData)-1` because we don't expect the last warm datapoint
// since it's for a different block.
@@ -1315,7 +1409,11 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) {
}
// Perform a snapshot.
- err := buffer.Snapshot(ctx, start, ident.StringID("some-id"), ident.Tags{}, assertPersistDataFn, nsCtx)
+ metadata := persist.NewMetadata(doc.Document{
+ ID: []byte("some-id"),
+ })
+
+ _, err := buffer.Snapshot(ctx, start, metadata, assertPersistDataFn, nsCtx)
require.NoError(t, err)
// Check internal state of warm bucket to make sure the merge happened and
@@ -1577,7 +1675,13 @@ func TestColdFlushBlockStarts(t *testing.T) {
}
func TestFetchBlocksForColdFlush(t *testing.T) {
- opts := newBufferTestOptions()
+ now := time.Now()
+ opts := newBufferTestOptions().SetColdWritesEnabled(true)
+ opts = opts.SetClockOptions(
+ opts.ClockOptions().SetNowFn(func() time.Time {
+ return now
+ }),
+ )
rops := opts.RetentionOptions()
blockSize := rops.BlockSize()
blockStart4 := time.Now().Truncate(blockSize)
@@ -1599,19 +1703,6 @@ func TestFetchBlocksForColdFlush(t *testing.T) {
},
},
},
- blockData{
- start: blockStart2,
- writeType: ColdWrite,
- data: [][]DecodedTestValue{
- {
- {blockStart2.Add(secs(2)), 4, xtime.Second, nil},
- {blockStart2.Add(secs(5)), 5, xtime.Second, nil},
- {blockStart2.Add(secs(11)), 6, xtime.Second, nil},
- {blockStart2.Add(secs(15)), 7, xtime.Second, nil},
- {blockStart2.Add(secs(40)), 8, xtime.Second, nil},
- },
- },
- },
blockData{
start: blockStart3,
writeType: ColdWrite,
@@ -1641,27 +1732,44 @@ func TestFetchBlocksForColdFlush(t *testing.T) {
ctx := context.NewContext()
defer ctx.Close()
nsCtx := namespace.Context{Schema: testSchemaDesc}
- reader, err := buffer.FetchBlocksForColdFlush(ctx, blockStart1, 4, nsCtx)
+ result, err := buffer.FetchBlocksForColdFlush(ctx, blockStart1, 4, nsCtx)
assert.NoError(t, err)
// Verify that we got the correct data and that version is correct set.
- requireReaderValuesEqual(t, expected[blockStartNano1], [][]xio.BlockReader{reader}, opts, nsCtx)
+ requireReaderValuesEqual(t, expected[blockStartNano1], [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
assert.Equal(t, 4, buffer.bucketsMap[blockStartNano1].buckets[0].version)
+ assert.Equal(t, now, result.FirstWrite)
- // Try to fetch from block1 again, which should result in error since we
- // just fetched, which would mark those buckets as not dirty.
- _, err = buffer.FetchBlocksForColdFlush(ctx, blockStart1, 9, nsCtx)
- assert.Error(t, err)
+ // Try to fetch from block1 again, this should not be an error because we
+ // would want to fetch blocks with buckets that failed to flush fully a
+ // previous time.
+ result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart1, 9, nsCtx)
+ assert.NoError(t, err)
+ assert.Equal(t, now, result.FirstWrite)
+
+ // Verify that writing to a cold block updates the first write time. No data in blockStart2 yet.
+ result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart2, 1, nsCtx)
+ assert.NoError(t, err)
+ requireReaderValuesEqual(t, []DecodedTestValue{}, [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
+ assert.Equal(t, time.Time{}, result.FirstWrite)
+ wasWritten, _, err := buffer.Write(ctx, testID, blockStart2, 1,
+ xtime.Second, nil, WriteOptions{})
+ assert.True(t, wasWritten)
+ result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart2, 1, nsCtx)
+ assert.NoError(t, err)
+ assert.Equal(t, now, result.FirstWrite)
- reader, err = buffer.FetchBlocksForColdFlush(ctx, blockStart3, 1, nsCtx)
+ result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart3, 1, nsCtx)
assert.NoError(t, err)
- requireReaderValuesEqual(t, expected[blockStartNano3], [][]xio.BlockReader{reader}, opts, nsCtx)
+ requireReaderValuesEqual(t, expected[blockStartNano3], [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
assert.Equal(t, 1, buffer.bucketsMap[blockStartNano3].buckets[0].version)
+ assert.Equal(t, now, result.FirstWrite)
// Try to fetch from a block that only has warm buckets. It has no data
// but is not an error.
- reader, err = buffer.FetchBlocksForColdFlush(ctx, blockStart4, 1, nsCtx)
+ result, err = buffer.FetchBlocksForColdFlush(ctx, blockStart4, 1, nsCtx)
assert.NoError(t, err)
- requireReaderValuesEqual(t, []DecodedTestValue{}, [][]xio.BlockReader{reader}, opts, nsCtx)
+ requireReaderValuesEqual(t, []DecodedTestValue{}, [][]xio.BlockReader{result.Blocks}, opts, nsCtx)
+ assert.Equal(t, time.Time{}, result.FirstWrite)
}
// TestBufferLoadWarmWrite tests the Load method, ensuring that blocks are successfully loaded into
@@ -1675,7 +1783,6 @@ func TestBufferLoadWarmWrite(t *testing.T) {
nsCtx = namespace.Context{}
)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
encoded, err := buffer.ReadEncoded(context.NewContext(), curr, curr.Add(blockSize), nsCtx)
@@ -1709,7 +1816,6 @@ func TestBufferLoadColdWrite(t *testing.T) {
nsCtx = namespace.Context{}
)
buffer.Reset(databaseBufferResetOptions{
- ID: ident.StringID("foo"),
Options: opts,
})
encoded, err := buffer.ReadEncoded(context.NewContext(), curr, curr.Add(blockSize), nsCtx)
@@ -1731,3 +1837,365 @@ func TestBufferLoadColdWrite(t *testing.T) {
coldFlushBlockStarts := buffer.ColdFlushBlockStarts(nil)
require.Equal(t, 1, coldFlushBlockStarts.Len())
}
+
+func TestUpsertProto(t *testing.T) {
+ opts := newBufferTestOptions()
+ rops := opts.RetentionOptions()
+ curr := time.Now().Truncate(rops.BlockSize())
+ opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
+ return curr
+ }))
+ var nsCtx namespace.Context
+
+ tests := []struct {
+ desc string
+ writes []writeAttempt
+ expectedData []DecodedTestValue
+ }{
+ {
+ desc: "Upsert proto",
+ writes: []writeAttempt{
+ {
+ data: DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
+ expectWritten: true,
+ expectErr: false,
+ },
+ {
+ data: DecodedTestValue{curr, 0, xtime.Second, []byte("two")},
+ expectWritten: true,
+ expectErr: false,
+ },
+ },
+ expectedData: []DecodedTestValue{
+ {curr, 0, xtime.Second, []byte("two")},
+ },
+ },
+ {
+ desc: "Duplicate proto",
+ writes: []writeAttempt{
+ {
+ data: DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
+ expectWritten: true,
+ expectErr: false,
+ },
+ {
+ data: DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
+ // Writes with the same value and the same annotation should
+ // not be written.
+ expectWritten: false,
+ expectErr: false,
+ },
+ },
+ expectedData: []DecodedTestValue{
+ {curr, 0, xtime.Second, []byte("one")},
+ },
+ },
+ {
+ desc: "Two datapoints different proto",
+ writes: []writeAttempt{
+ {
+ data: DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
+ expectWritten: true,
+ expectErr: false,
+ },
+ {
+ data: DecodedTestValue{curr.Add(time.Second), 0, xtime.Second, []byte("two")},
+ expectWritten: true,
+ expectErr: false,
+ },
+ },
+ expectedData: []DecodedTestValue{
+ {curr, 0, xtime.Second, []byte("one")},
+ {curr.Add(time.Second), 0, xtime.Second, []byte("two")},
+ },
+ },
+ {
+ desc: "Two datapoints same proto",
+ writes: []writeAttempt{
+ {
+ data: DecodedTestValue{curr, 0, xtime.Second, []byte("one")},
+ expectWritten: true,
+ expectErr: false,
+ },
+ {
+ data: DecodedTestValue{curr.Add(time.Second), 0, xtime.Second, []byte("one")},
+ expectWritten: true,
+ expectErr: false,
+ },
+ },
+ expectedData: []DecodedTestValue{
+ {curr, 0, xtime.Second, []byte("one")},
+ // This is special cased in the proto encoder. It has logic
+ // handling the case where two values are the same and writes
+ // that nothing has changed instead of re-encoding the blob
+ // again.
+ {curr.Add(time.Second), 0, xtime.Second, nil},
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ buffer := newDatabaseBuffer().(*dbBuffer)
+ buffer.Reset(databaseBufferResetOptions{
+ Options: opts,
+ })
+
+ for _, write := range test.writes {
+ verifyWriteToBuffer(t, testID, buffer, write.data, nsCtx.Schema,
+ write.expectWritten, write.expectErr)
+ }
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ results, err := buffer.ReadEncoded(ctx, timeZero, timeDistantFuture, nsCtx)
+ assert.NoError(t, err)
+ assert.NotNil(t, results)
+
+ requireReaderValuesEqual(t, test.expectedData, results, opts, nsCtx)
+ })
+ }
+}
+
+type writeAttempt struct {
+ data DecodedTestValue
+ expectWritten bool
+ expectErr bool
+}
+
+func TestEncoderLimit(t *testing.T) {
+ type writeTimeOffset struct {
+ timeOffset int
+ expectTooManyEncodersErr bool
+ }
+
+ tests := []struct {
+ desc string
+ encodersPerBlockLimit int
+ writes []writeTimeOffset
+ }{
+ {
+ desc: "one encoder, no limit",
+ encodersPerBlockLimit: 0, // 0 means no limit.
+ writes: []writeTimeOffset{
+ // Writes are in order, so just one encoder.
+ {
+ timeOffset: 1,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 2,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 3,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 4,
+ expectTooManyEncodersErr: false,
+ },
+ },
+ },
+ {
+ desc: "many encoders, no limit",
+ encodersPerBlockLimit: 0, // 0 means no limit.
+ writes: []writeTimeOffset{
+ // Writes are in reverse chronological order, so every write
+ // requires a new encoder.
+ {
+ timeOffset: 9,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 8,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 7,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 6,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 5,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 4,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 3,
+ expectTooManyEncodersErr: false,
+ },
+ {
+ timeOffset: 2,
+ expectTooManyEncodersErr: false,
+ },
+ },
+ },
+ {
+ desc: "within limit",
+ encodersPerBlockLimit: 3,
+ writes: []writeTimeOffset{
+ // First encoder created.
+ {
+ timeOffset: 3,
+ expectTooManyEncodersErr: false,
+ },
+ // Second encoder created.
+ {
+ timeOffset: 2,
+ expectTooManyEncodersErr: false,
+ },
+ // Third encoder created.
+ {
+ timeOffset: 1,
+ expectTooManyEncodersErr: false,
+ },
+ },
+ },
+ {
+ desc: "within limit, many writes",
+ encodersPerBlockLimit: 2,
+ writes: []writeTimeOffset{
+ // First encoder created.
+ {
+ timeOffset: 10,
+ expectTooManyEncodersErr: false,
+ },
+ // Goes in first encoder.
+ {
+ timeOffset: 11,
+ expectTooManyEncodersErr: false,
+ },
+ // Goes in first encoder.
+ {
+ timeOffset: 12,
+ expectTooManyEncodersErr: false,
+ },
+ // Second encoder created.
+ {
+ timeOffset: 1,
+ expectTooManyEncodersErr: false,
+ },
+ // Goes in second encoder.
+ {
+ timeOffset: 2,
+ expectTooManyEncodersErr: false,
+ },
+ // Goes in first encoder.
+ {
+ timeOffset: 13,
+ expectTooManyEncodersErr: false,
+ },
+ // Goes in second encoder.
+ {
+ timeOffset: 3,
+ expectTooManyEncodersErr: false,
+ },
+ },
+ },
+ {
+ desc: "too many encoders",
+ encodersPerBlockLimit: 3,
+ writes: []writeTimeOffset{
+ // First encoder created.
+ {
+ timeOffset: 5,
+ expectTooManyEncodersErr: false,
+ },
+ // Second encoder created.
+ {
+ timeOffset: 4,
+ expectTooManyEncodersErr: false,
+ },
+ // Third encoder created.
+ {
+ timeOffset: 3,
+ expectTooManyEncodersErr: false,
+ },
+ // Requires fourth encoder, which is past the limit.
+ {
+ timeOffset: 2,
+ expectTooManyEncodersErr: true,
+ },
+ },
+ },
+ {
+ desc: "too many encoders, more writes",
+ encodersPerBlockLimit: 2,
+ writes: []writeTimeOffset{
+ // First encoder created.
+ {
+ timeOffset: 10,
+ expectTooManyEncodersErr: false,
+ },
+ // Second encoder created.
+ {
+ timeOffset: 2,
+ expectTooManyEncodersErr: false,
+ },
+ // Goes in second encoder.
+ {
+ timeOffset: 3,
+ expectTooManyEncodersErr: false,
+ },
+ // Goes in first encoder.
+ {
+ timeOffset: 11,
+ expectTooManyEncodersErr: false,
+ },
+ // Requires third encoder, which is past the limit.
+ {
+ timeOffset: 1,
+ expectTooManyEncodersErr: true,
+ },
+ // Goes in second encoder.
+ {
+ timeOffset: 4,
+ expectTooManyEncodersErr: false,
+ },
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ opts := newBufferTestOptions()
+ rops := opts.RetentionOptions()
+ curr := time.Now().Truncate(rops.BlockSize())
+ opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(func() time.Time {
+ return curr
+ }))
+ runtimeOptsMgr := opts.RuntimeOptionsManager()
+ newRuntimeOpts := runtimeOptsMgr.Get().
+ SetEncodersPerBlockLimit(test.encodersPerBlockLimit)
+ runtimeOptsMgr.Update(newRuntimeOpts)
+ buffer := newDatabaseBuffer().(*dbBuffer)
+ buffer.Reset(databaseBufferResetOptions{Options: opts})
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ for i, write := range test.writes {
+ wasWritten, writeType, err := buffer.Write(ctx, testID,
+ curr.Add(time.Duration(write.timeOffset)*time.Millisecond),
+ float64(i), xtime.Millisecond, nil, WriteOptions{})
+
+ if write.expectTooManyEncodersErr {
+ assert.Error(t, err)
+ assert.True(t, xerrors.IsInvalidParams(err))
+ assert.Equal(t, errTooManyEncoders, err)
+ } else {
+ assert.NoError(t, err)
+ assert.True(t, wasWritten)
+ assert.Equal(t, WarmWrite, writeType)
+ }
+ }
+ })
+ }
+}
diff --git a/src/dbnode/storage/series/lookup/entry.go b/src/dbnode/storage/series/lookup/entry.go
index 1dd242d1fe..3e4ea9df5d 100644
--- a/src/dbnode/storage/series/lookup/entry.go
+++ b/src/dbnode/storage/series/lookup/entry.go
@@ -151,7 +151,8 @@ func (entry *Entry) OnIndexSuccess(blockStartNanos xtime.UnixNano) {
entry.reverseIndex.Unlock()
}
-// OnIndexFinalize marks any attempt for the given block start is finished.
+// OnIndexFinalize marks any attempt for the given block start as finished
+// and decrements the entry ref count.
func (entry *Entry) OnIndexFinalize(blockStartNanos xtime.UnixNano) {
entry.reverseIndex.Lock()
entry.reverseIndex.setAttemptWithWLock(blockStartNanos, false)
@@ -213,7 +214,7 @@ func (s *entryIndexState) setSuccessWithWLock(t xtime.UnixNano) {
for i := range s.states {
if s.states[i].blockStart.Equal(t) {
s.states[i].success = true
- break
+ return
}
}
diff --git a/src/dbnode/storage/series/lookup/lookup_mock.go b/src/dbnode/storage/series/lookup/lookup_mock.go
index 66de4c3624..ebcdf5f5a2 100644
--- a/src/dbnode/storage/series/lookup/lookup_mock.go
+++ b/src/dbnode/storage/series/lookup/lookup_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/storage/series/lookup (interfaces: OnReleaseReadWriteRef)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/dbnode/storage/series/options.go b/src/dbnode/storage/series/options.go
index 9a8c9dbec1..682b432e28 100644
--- a/src/dbnode/storage/series/options.go
+++ b/src/dbnode/storage/series/options.go
@@ -24,6 +24,7 @@ import (
"github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/retention"
+ m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
@@ -46,6 +47,7 @@ type options struct {
coldWritesEnabled bool
bufferBucketPool *BufferBucketPool
bufferBucketVersionsPool *BufferBucketVersionsPool
+ runtimeOptsMgr m3dbruntime.OptionsManager
}
// NewOptions creates new database series options
@@ -218,3 +220,13 @@ func (o *options) SetBufferBucketPool(value *BufferBucketPool) Options {
func (o *options) BufferBucketPool() *BufferBucketPool {
return o.bufferBucketPool
}
+
+func (o *options) SetRuntimeOptionsManager(value m3dbruntime.OptionsManager) Options {
+ opts := *o
+ opts.runtimeOptsMgr = value
+ return &opts
+}
+
+func (o *options) RuntimeOptionsManager() m3dbruntime.OptionsManager {
+ return o.runtimeOptsMgr
+}
diff --git a/src/dbnode/storage/series/series.go b/src/dbnode/storage/series/series.go
index 79c78e447d..a08718cf88 100644
--- a/src/dbnode/storage/series/series.go
+++ b/src/dbnode/storage/series/series.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -42,17 +43,14 @@ import (
var (
// ErrSeriesAllDatapointsExpired is returned on tick when all datapoints are expired
ErrSeriesAllDatapointsExpired = errors.New("series datapoints are all expired")
- // errSeriesMatchUniqueIndexFailed is returned when MatchUniqueIndex is
- // specified for a write but the value does not match the current series
- // unique index.
- errSeriesMatchUniqueIndexFailed = errors.New("series write failed due to unique index not matched")
- // errSeriesMatchUniqueIndexInvalid is returned when MatchUniqueIndex is
- // specified for a write but the current series unique index is invalid.
- errSeriesMatchUniqueIndexInvalid = errors.New("series write failed due to unique index being invalid")
errSeriesAlreadyBootstrapped = errors.New("series is already bootstrapped")
errSeriesNotBootstrapped = errors.New("series is not yet bootstrapped")
errBlockStateSnapshotNotBootstrapped = errors.New("block state snapshot is not bootstrapped")
+
+ // Placeholder for a timeseries being bootstrapped which does not
+ // have access to the TS ID.
+ bootstrapWriteID = ident.StringID("bootstrap_timeseries")
)
type dbSeries struct {
@@ -60,13 +58,19 @@ type dbSeries struct {
opts Options
// NB(r): One should audit all places that access the
- // series ID before changing ownership semantics (e.g.
+ // series metadata before changing ownership semantics (e.g.
// pooling the ID rather than releasing it to the GC on
// calling series.Reset()).
+ // Note: The bytes that back "id ident.ID" are the same bytes
+ // that are behind the ID in "metadata doc.Document", the whole
+ // reason we keep an ident.ID on the series is since there's a lot
+ // of existing callsites that require the ID as an ident.ID.
id ident.ID
- tags ident.Tags
+ metadata doc.Document
uniqueIndex uint64
+ bootstrap dbSeriesBootstrap
+
buffer databaseBuffer
cachedBlocks block.DatabaseSeriesBlocks
blockRetriever QueryableBlockRetriever
@@ -75,6 +79,14 @@ type dbSeries struct {
pool DatabaseSeriesPool
}
+type dbSeriesBootstrap struct {
+ sync.Mutex
+
+ // buffer should be nil unless this series
+ // has taken bootstrap writes.
+ buffer databaseBuffer
+}
+
// NewDatabaseSeries creates a new database series.
func NewDatabaseSeries(opts DatabaseSeriesOptions) DatabaseSeries {
s := newDatabaseSeries()
@@ -111,11 +123,11 @@ func (s *dbSeries) ID() ident.ID {
return id
}
-func (s *dbSeries) Tags() ident.Tags {
+func (s *dbSeries) Metadata() doc.Document {
s.RLock()
- tags := s.tags
+ metadata := s.metadata
s.RUnlock()
- return tags
+ return metadata
}
func (s *dbSeries) UniqueIndex() uint64 {
@@ -144,10 +156,21 @@ func (s *dbSeries) Tick(blockStates ShardBlockStateSnapshot, nsCtx namespace.Con
s.Unlock()
- if update.ActiveBlocks == 0 {
- return r, ErrSeriesAllDatapointsExpired
+ if update.ActiveBlocks > 0 {
+ return r, nil
+ }
+
+ // Check if any bootstrap writes that hasn't been merged yet.
+ s.bootstrap.Lock()
+ unmergedBootstrapDatapoints := s.bootstrap.buffer != nil
+ s.bootstrap.Unlock()
+
+ if unmergedBootstrapDatapoints {
+ return r, nil
}
- return r, nil
+
+ // Everything expired.
+ return r, ErrSeriesAllDatapointsExpired
}
type updateBlocksResult struct {
@@ -274,6 +297,13 @@ func (s *dbSeries) IsEmpty() bool {
return false
}
+func (s *dbSeries) IsBufferEmptyAtBlockStart(blockStart time.Time) bool {
+ s.RLock()
+ bufferEmpty := s.buffer.IsEmptyAtBlockStart(blockStart)
+ s.RUnlock()
+ return bufferEmpty
+}
+
func (s *dbSeries) NumActiveBlocks() int {
s.RLock()
value := s.cachedBlocks.Len() + s.buffer.Stats().wiredBlocks
@@ -288,26 +318,76 @@ func (s *dbSeries) Write(
unit xtime.Unit,
annotation []byte,
wOpts WriteOptions,
-) (bool, error) {
+) (bool, WriteType, error) {
+ if wOpts.BootstrapWrite {
+ // NB(r): If this is a bootstrap write we store this in a
+ // side buffer so that we don't need to take the series lock
+ // and contend with normal writes that are flowing into the DB
+ // while bootstrapping which can significantly interrupt
+ // write latency and cause entire DB to stall/degrade in performance.
+ return s.bootstrapWrite(ctx, timestamp, value, unit, annotation, wOpts)
+ }
+
s.Lock()
- matchUniqueIndex := wOpts.MatchUniqueIndex
- if matchUniqueIndex {
- if s.uniqueIndex == 0 {
- return false, errSeriesMatchUniqueIndexInvalid
+ written, writeType, err := s.buffer.Write(ctx, s.id, timestamp, value,
+ unit, annotation, wOpts)
+ s.Unlock()
+
+ return written, writeType, err
+}
+
+func (s *dbSeries) bootstrapWrite(
+ ctx context.Context,
+ timestamp time.Time,
+ value float64,
+ unit xtime.Unit,
+ annotation []byte,
+ wOpts WriteOptions,
+) (bool, WriteType, error) {
+ s.bootstrap.Lock()
+ defer s.bootstrap.Unlock()
+
+ if s.bootstrap.buffer == nil {
+ // Temporarily release bootstrap lock.
+ s.bootstrap.Unlock()
+
+ // Get reset opts.
+ resetOpts, err := s.bufferResetOpts()
+
+ // Re-lock bootstrap lock.
+ s.bootstrap.Lock()
+
+ if err != nil {
+ // Abort if failed to get buffer opts.
+ var writeType WriteType
+ return false, writeType, err
}
- if s.uniqueIndex != wOpts.MatchUniqueIndexValue {
- // NB(r): Match unique index allows for a caller to
- // reliably take a reference to a series and call Write(...)
- // later while keeping a direct reference to the series
- // while the shard and namespace continues to own and manage
- // the lifecycle of the series.
- return false, errSeriesMatchUniqueIndexFailed
+
+ // If buffer still nil then set it.
+ if s.bootstrap.buffer == nil {
+ s.bootstrap.buffer = newDatabaseBuffer()
+ s.bootstrap.buffer.Reset(resetOpts)
}
}
- wasWritten, err := s.buffer.Write(ctx, timestamp, value, unit, annotation, wOpts)
- s.Unlock()
- return wasWritten, err
+ return s.bootstrap.buffer.Write(ctx, bootstrapWriteID, timestamp,
+ value, unit, annotation, wOpts)
+}
+
+func (s *dbSeries) bufferResetOpts() (databaseBufferResetOptions, error) {
+ // Grab series lock.
+ s.RLock()
+ defer s.RUnlock()
+
+ if s.id == nil {
+ // Not active, expired series.
+ return databaseBufferResetOptions{}, ErrSeriesAllDatapointsExpired
+ }
+
+ return databaseBufferResetOptions{
+ BlockRetriever: s.blockRetriever,
+ Options: s.opts,
+ }, nil
}
func (s *dbSeries) ReadEncoded(
@@ -327,14 +407,14 @@ func (s *dbSeries) FetchBlocksForColdFlush(
start time.Time,
version int,
nsCtx namespace.Context,
-) ([]xio.BlockReader, error) {
+) (block.FetchBlockResult, error) {
// This needs a write lock because the version on underlying buckets need
// to be modified.
s.Lock()
- br, err := s.buffer.FetchBlocksForColdFlush(ctx, start, version, nsCtx)
+ result, err := s.buffer.FetchBlocksForColdFlush(ctx, start, version, nsCtx)
s.Unlock()
- return br, err
+ return result, err
}
func (s *dbSeries) FetchBlocks(
@@ -379,7 +459,7 @@ func (s *dbSeries) FetchBlocksMetadata(
// NB(r): Since ID and Tags are garbage collected we can safely
// return refs.
tagsIter := s.opts.IdentifierPool().TagsIterator()
- tagsIter.Reset(s.tags)
+ tagsIter.ResetFields(s.metadata.Fields)
return block.NewFetchBlocksMetadataResult(s.id, tagsIter, res), nil
}
@@ -528,7 +608,8 @@ func (s *dbSeries) WarmFlush(
// Need a write lock because the buffer WarmFlush method mutates
// state (by performing a pro-active merge).
s.Lock()
- outcome, err := s.buffer.WarmFlush(ctx, blockStart, s.id, s.tags, persistFn, nsCtx)
+ outcome, err := s.buffer.WarmFlush(ctx, blockStart,
+ persist.NewMetadata(s.metadata), persistFn, nsCtx)
s.Unlock()
return outcome, err
}
@@ -538,13 +619,14 @@ func (s *dbSeries) Snapshot(
blockStart time.Time,
persistFn persist.DataFn,
nsCtx namespace.Context,
-) error {
+) (SnapshotResult, error) {
// Need a write lock because the buffer Snapshot method mutates
// state (by performing a pro-active merge).
s.Lock()
- defer s.Unlock()
-
- return s.buffer.Snapshot(ctx, blockStart, s.id, s.tags, persistFn, nsCtx)
+ result, err := s.buffer.Snapshot(ctx, blockStart,
+ persist.NewMetadata(s.metadata), persistFn, nsCtx)
+ s.Unlock()
+ return result, err
}
func (s *dbSeries) ColdFlushBlockStarts(blockStates BootstrappedBlockStateSnapshot) OptimizedTimes {
@@ -554,13 +636,43 @@ func (s *dbSeries) ColdFlushBlockStarts(blockStates BootstrappedBlockStateSnapsh
return s.buffer.ColdFlushBlockStarts(blockStates.Snapshot)
}
+func (s *dbSeries) Bootstrap(nsCtx namespace.Context) error {
+ // NB(r): Need to hold the lock the whole time since
+ // this needs to be consistent view for a tick to see.
+ s.Lock()
+ defer s.Unlock()
+
+ s.bootstrap.Lock()
+ bootstrapBuffer := s.bootstrap.buffer
+ s.bootstrap.buffer = nil
+ s.bootstrap.Unlock()
+
+ if bootstrapBuffer == nil {
+ return nil
+ }
+
+ // NB(r): Now bootstrapped need to move bootstrap writes to the
+ // normal series buffer to make them visible to DB.
+ // We store these bootstrap writes in a side buffer so that we don't
+ // need to take the series lock and contend with normal writes
+ // that flow into the DB while bootstrapping which can significantly
+ // interrupt write latency and cause entire DB to stall/degrade in performance.
+ return bootstrapBuffer.MoveTo(s.buffer, nsCtx)
+}
+
func (s *dbSeries) Close() {
+ s.bootstrap.Lock()
+ if s.bootstrap.buffer != nil {
+ s.bootstrap.buffer = nil
+ }
+ s.bootstrap.Unlock()
+
s.Lock()
defer s.Unlock()
// See Reset() for why these aren't finalized.
s.id = nil
- s.tags = ident.Tags{}
+ s.metadata = doc.Document{}
s.uniqueIndex = 0
switch s.opts.CachePolicy() {
@@ -604,11 +716,10 @@ func (s *dbSeries) Reset(opts DatabaseSeriesOptions) {
// The same goes for the series tags.
s.Lock()
s.id = opts.ID
- s.tags = opts.Tags
+ s.metadata = opts.Metadata
s.uniqueIndex = opts.UniqueIndex
s.cachedBlocks.Reset()
s.buffer.Reset(databaseBufferResetOptions{
- ID: opts.ID,
BlockRetriever: opts.BlockRetriever,
Options: opts.Options,
})
diff --git a/src/dbnode/storage/series/series_mock.go b/src/dbnode/storage/series/series_mock.go
index 7f127bb417..0050ae6801 100644
--- a/src/dbnode/storage/series/series_mock.go
+++ b/src/dbnode/storage/series/series_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/storage/series (interfaces: DatabaseSeries,QueryableBlockRetriever)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -33,6 +33,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
time0 "github.com/m3db/m3/src/x/time"
@@ -63,6 +64,20 @@ func (m *MockDatabaseSeries) EXPECT() *MockDatabaseSeriesMockRecorder {
return m.recorder
}
+// Bootstrap mocks base method
+func (m *MockDatabaseSeries) Bootstrap(arg0 namespace.Context) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Bootstrap", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Bootstrap indicates an expected call of Bootstrap
+func (mr *MockDatabaseSeriesMockRecorder) Bootstrap(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockDatabaseSeries)(nil).Bootstrap), arg0)
+}
+
// Close mocks base method
func (m *MockDatabaseSeries) Close() {
m.ctrl.T.Helper()
@@ -105,10 +120,10 @@ func (mr *MockDatabaseSeriesMockRecorder) FetchBlocks(arg0, arg1, arg2 interface
}
// FetchBlocksForColdFlush mocks base method
-func (m *MockDatabaseSeries) FetchBlocksForColdFlush(arg0 context.Context, arg1 time.Time, arg2 int, arg3 namespace.Context) ([]xio.BlockReader, error) {
+func (m *MockDatabaseSeries) FetchBlocksForColdFlush(arg0 context.Context, arg1 time.Time, arg2 int, arg3 namespace.Context) (block.FetchBlockResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchBlocksForColdFlush", arg0, arg1, arg2, arg3)
- ret0, _ := ret[0].([]xio.BlockReader)
+ ret0, _ := ret[0].(block.FetchBlockResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -148,6 +163,20 @@ func (mr *MockDatabaseSeriesMockRecorder) ID() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockDatabaseSeries)(nil).ID))
}
+// IsBufferEmptyAtBlockStart mocks base method
+func (m *MockDatabaseSeries) IsBufferEmptyAtBlockStart(arg0 time.Time) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsBufferEmptyAtBlockStart", arg0)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsBufferEmptyAtBlockStart indicates an expected call of IsBufferEmptyAtBlockStart
+func (mr *MockDatabaseSeriesMockRecorder) IsBufferEmptyAtBlockStart(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsBufferEmptyAtBlockStart", reflect.TypeOf((*MockDatabaseSeries)(nil).IsBufferEmptyAtBlockStart), arg0)
+}
+
// IsEmpty mocks base method
func (m *MockDatabaseSeries) IsEmpty() bool {
m.ctrl.T.Helper()
@@ -176,6 +205,20 @@ func (mr *MockDatabaseSeriesMockRecorder) LoadBlock(arg0, arg1 interface{}) *gom
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadBlock", reflect.TypeOf((*MockDatabaseSeries)(nil).LoadBlock), arg0, arg1)
}
+// Metadata mocks base method
+func (m *MockDatabaseSeries) Metadata() doc.Document {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Metadata")
+ ret0, _ := ret[0].(doc.Document)
+ return ret0
+}
+
+// Metadata indicates an expected call of Metadata
+func (mr *MockDatabaseSeriesMockRecorder) Metadata() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockDatabaseSeries)(nil).Metadata))
+}
+
// NumActiveBlocks mocks base method
func (m *MockDatabaseSeries) NumActiveBlocks() int {
m.ctrl.T.Helper()
@@ -242,11 +285,12 @@ func (mr *MockDatabaseSeriesMockRecorder) Reset(arg0 interface{}) *gomock.Call {
}
// Snapshot mocks base method
-func (m *MockDatabaseSeries) Snapshot(arg0 context.Context, arg1 time.Time, arg2 persist.DataFn, arg3 namespace.Context) error {
+func (m *MockDatabaseSeries) Snapshot(arg0 context.Context, arg1 time.Time, arg2 persist.DataFn, arg3 namespace.Context) (SnapshotResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Snapshot", arg0, arg1, arg2, arg3)
- ret0, _ := ret[0].(error)
- return ret0
+ ret0, _ := ret[0].(SnapshotResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// Snapshot indicates an expected call of Snapshot
@@ -255,20 +299,6 @@ func (mr *MockDatabaseSeriesMockRecorder) Snapshot(arg0, arg1, arg2, arg3 interf
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockDatabaseSeries)(nil).Snapshot), arg0, arg1, arg2, arg3)
}
-// Tags mocks base method
-func (m *MockDatabaseSeries) Tags() ident.Tags {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Tags")
- ret0, _ := ret[0].(ident.Tags)
- return ret0
-}
-
-// Tags indicates an expected call of Tags
-func (mr *MockDatabaseSeriesMockRecorder) Tags() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tags", reflect.TypeOf((*MockDatabaseSeries)(nil).Tags))
-}
-
// Tick mocks base method
func (m *MockDatabaseSeries) Tick(arg0 ShardBlockStateSnapshot, arg1 namespace.Context) (TickResult, error) {
m.ctrl.T.Helper()
@@ -314,12 +344,13 @@ func (mr *MockDatabaseSeriesMockRecorder) WarmFlush(arg0, arg1, arg2, arg3 inter
}
// Write mocks base method
-func (m *MockDatabaseSeries) Write(arg0 context.Context, arg1 time.Time, arg2 float64, arg3 time0.Unit, arg4 []byte, arg5 WriteOptions) (bool, error) {
+func (m *MockDatabaseSeries) Write(arg0 context.Context, arg1 time.Time, arg2 float64, arg3 time0.Unit, arg4 []byte, arg5 WriteOptions) (bool, WriteType, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Write", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(bool)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(WriteType)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
// Write indicates an expected call of Write
diff --git a/src/dbnode/storage/series/series_parallel_test.go b/src/dbnode/storage/series/series_parallel_test.go
index 72b288ee72..4bc69e0565 100644
--- a/src/dbnode/storage/series/series_parallel_test.go
+++ b/src/dbnode/storage/series/series_parallel_test.go
@@ -77,7 +77,7 @@ func TestSeriesWriteReadParallel(t *testing.T) {
wg.Add(1)
go func() {
for i := 0; i < numStepsPerWorker; i++ {
- wasWritten, err := series.Write(
+ wasWritten, _, err := series.Write(
ctx, curr.Add(time.Duration(i)*time.Nanosecond), float64(i), xtime.Second, nil, WriteOptions{})
if err != nil {
panic(err)
diff --git a/src/dbnode/storage/series/series_test.go b/src/dbnode/storage/series/series_test.go
index 10585be171..7d9d918f5a 100644
--- a/src/dbnode/storage/series/series_test.go
+++ b/src/dbnode/storage/series/series_test.go
@@ -30,8 +30,11 @@ import (
"github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/retention"
+ m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/block"
+ "github.com/m3db/m3/src/dbnode/storage/index/convert"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/x/context"
@@ -65,7 +68,8 @@ func newSeriesTestOptions() Options {
SetEncoderPool(encoderPool).
SetMultiReaderIteratorPool(multiReaderIteratorPool).
SetBufferBucketPool(bufferBucketPool).
- SetBufferBucketVersionsPool(bufferBucketVersionsPool)
+ SetBufferBucketVersionsPool(bufferBucketVersionsPool).
+ SetRuntimeOptionsManager(m3dbruntime.NewOptionsManager())
opts = opts.
SetRetentionOptions(opts.
RetentionOptions().
@@ -92,7 +96,7 @@ func TestSeriesEmpty(t *testing.T) {
// Writes to series, verifying no error and that further writes should happen.
func verifyWriteToSeries(t *testing.T, series *dbSeries, v DecodedTestValue) {
ctx := context.NewContext()
- wasWritten, err := series.Write(ctx, v.Timestamp, v.Value,
+ wasWritten, _, err := series.Write(ctx, v.Timestamp, v.Value,
v.Unit, v.Annotation, WriteOptions{})
require.NoError(t, err)
require.True(t, wasWritten)
@@ -189,7 +193,7 @@ func TestSeriesSamePointDoesNotWrite(t *testing.T) {
for i, v := range data {
curr = v.Timestamp
ctx := context.NewContext()
- wasWritten, err := series.Write(ctx, v.Timestamp, v.Value, v.Unit, v.Annotation, WriteOptions{})
+ wasWritten, _, err := series.Write(ctx, v.Timestamp, v.Value, v.Unit, v.Annotation, WriteOptions{})
require.NoError(t, err)
if i == 0 || i == len(data)-1 {
require.True(t, wasWritten)
@@ -514,7 +518,6 @@ func TestSeriesFlush(t *testing.T) {
AnyTimes()
series := NewDatabaseSeries(DatabaseSeriesOptions{
- ID: ident.StringID("foo"),
BlockRetriever: blockRetriever,
Options: opts,
}).(*dbSeries)
@@ -523,12 +526,12 @@ func TestSeriesFlush(t *testing.T) {
assert.NoError(t, err)
ctx := context.NewContext()
- series.buffer.Write(ctx, curr, 1234, xtime.Second, nil, WriteOptions{})
+ series.buffer.Write(ctx, testID, curr, 1234, xtime.Second, nil, WriteOptions{})
ctx.BlockingClose()
inputs := []error{errors.New("some error"), nil}
for _, input := range inputs {
- persistFn := func(_ ident.ID, _ ident.Tags, _ ts.Segment, _ uint32) error {
+ persistFn := func(_ persist.Metadata, _ ts.Segment, _ uint32) error {
return input
}
ctx := context.NewContext()
@@ -1148,10 +1151,13 @@ func TestSeriesOutOfOrderWritesAndRotate(t *testing.T) {
expected []ts.Datapoint
)
+ metadata, err := convert.FromSeriesIDAndTags(id, tags)
+ require.NoError(t, err)
+
series := NewDatabaseSeries(DatabaseSeriesOptions{
- ID: id,
- Tags: tags,
- Options: opts,
+ ID: id,
+ Metadata: metadata,
+ Options: opts,
}).(*dbSeries)
for iter := 0; iter < numBlocks; iter++ {
@@ -1159,10 +1165,10 @@ func TestSeriesOutOfOrderWritesAndRotate(t *testing.T) {
value := startValue
for i := 0; i < numPoints; i++ {
- wasWritten, err := series.Write(ctx, start, value, xtime.Second, nil, WriteOptions{})
+ wasWritten, _, err := series.Write(ctx, start, value, xtime.Second, nil, WriteOptions{})
require.NoError(t, err)
assert.True(t, wasWritten)
- expected = append(expected, ts.Datapoint{Timestamp: start, Value: value})
+ expected = append(expected, ts.Datapoint{Timestamp: start, TimestampNanos: xtime.ToUnixNano(start), Value: value})
start = start.Add(10 * time.Second)
value = value + 1.0
}
@@ -1171,7 +1177,7 @@ func TestSeriesOutOfOrderWritesAndRotate(t *testing.T) {
start = now
value = startValue
for i := 0; i < numPoints/2; i++ {
- wasWritten, err := series.Write(ctx, start, value, xtime.Second, nil, WriteOptions{})
+ wasWritten, _, err := series.Write(ctx, start, value, xtime.Second, nil, WriteOptions{})
require.NoError(t, err)
assert.True(t, wasWritten)
start = start.Add(10 * time.Second)
@@ -1191,8 +1197,8 @@ func TestSeriesOutOfOrderWritesAndRotate(t *testing.T) {
ID: id,
Namespace: nsID,
Tags: ident.NewTagsIterator(tags),
- StartInclusive: qStart,
- EndExclusive: qEnd,
+ StartInclusive: xtime.ToUnixNano(qStart),
+ EndExclusive: xtime.ToUnixNano(qEnd),
Replicas: []encoding.MultiReaderIterator{multiIt},
}, nil)
defer it.Close()
@@ -1252,15 +1258,15 @@ func TestSeriesWriteReadFromTheSameBucket(t *testing.T) {
ctx := context.NewContext()
defer ctx.Close()
- wasWritten, err := series.Write(ctx, curr.Add(-3*time.Minute),
+ wasWritten, _, err := series.Write(ctx, curr.Add(-3*time.Minute),
1, xtime.Second, nil, WriteOptions{})
assert.NoError(t, err)
assert.True(t, wasWritten)
- wasWritten, err = series.Write(ctx, curr.Add(-2*time.Minute),
+ wasWritten, _, err = series.Write(ctx, curr.Add(-2*time.Minute),
2, xtime.Second, nil, WriteOptions{})
assert.NoError(t, err)
assert.True(t, wasWritten)
- wasWritten, err = series.Write(ctx, curr.Add(-1*time.Minute),
+ wasWritten, _, err = series.Write(ctx, curr.Add(-1*time.Minute),
3, xtime.Second, nil, WriteOptions{})
assert.NoError(t, err)
assert.True(t, wasWritten)
diff --git a/src/dbnode/storage/series/types.go b/src/dbnode/storage/series/types.go
index 1c3508dfe8..17f1f829b9 100644
--- a/src/dbnode/storage/series/types.go
+++ b/src/dbnode/storage/series/types.go
@@ -28,8 +28,10 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -41,7 +43,7 @@ import (
// DatabaseSeriesOptions is a set of options for creating a database series.
type DatabaseSeriesOptions struct {
ID ident.ID
- Tags ident.Tags
+ Metadata doc.Document
UniqueIndex uint64
BlockRetriever QueryableBlockRetriever
OnRetrieveBlock block.OnRetrieveBlock
@@ -57,8 +59,8 @@ type DatabaseSeries interface {
// ID returns the ID of the series.
ID() ident.ID
- // Tags return the tags of the series.
- Tags() ident.Tags
+ // Metadata returns the metadata of the series.
+ Metadata() doc.Document
// UniqueIndex is the unique index for the series (for this current
// process, unless the time series expires).
@@ -75,7 +77,7 @@ type DatabaseSeries interface {
unit xtime.Unit,
annotation []byte,
wOpts WriteOptions,
- ) (bool, error)
+ ) (bool, WriteType, error)
// ReadEncoded reads encoded blocks.
ReadEncoded(
@@ -99,7 +101,7 @@ type DatabaseSeries interface {
start time.Time,
version int,
nsCtx namespace.Context,
- ) ([]xio.BlockReader, error)
+ ) (block.FetchBlockResult, error)
// FetchBlocksMetadata returns the blocks metadata.
FetchBlocksMetadata(
@@ -108,9 +110,13 @@ type DatabaseSeries interface {
opts FetchBlocksMetadataOptions,
) (block.FetchBlocksMetadataResult, error)
- // IsEmpty returns whether series is empty.
+ // IsEmpty returns whether series is empty (includes both cached blocks and in-mem buffer data).
IsEmpty() bool
+ // IsBufferEmptyAtBlockStart returns whether the series buffer is empty at block start
+ // (only checks for in-mem buffer data).
+ IsBufferEmptyAtBlockStart(time.Time) bool
+
// NumActiveBlocks returns the number of active blocks the series currently holds.
NumActiveBlocks() int
@@ -135,11 +141,15 @@ type DatabaseSeries interface {
blockStart time.Time,
persistFn persist.DataFn,
nsCtx namespace.Context,
- ) error
+ ) (SnapshotResult, error)
// ColdFlushBlockStarts returns the block starts that need cold flushes.
ColdFlushBlockStarts(blockStates BootstrappedBlockStateSnapshot) OptimizedTimes
+ // Bootstrap will moved any bootstrapped data to buffer so series
+ // is ready for reading.
+ Bootstrap(nsCtx namespace.Context) error
+
// Close will close the series and if pooled returned to the pool.
Close()
@@ -147,6 +157,28 @@ type DatabaseSeries interface {
Reset(opts DatabaseSeriesOptions)
}
+// SnapshotResult contains metadata regarding the snapshot.
+type SnapshotResult struct {
+ Persist bool
+ Stats SnapshotResultStats
+}
+
+// SnapshotResultStats contains stats regarding the snapshot.
+type SnapshotResultStats struct {
+ TimeMergeByBucket time.Duration
+ TimeMergeAcrossBuckets time.Duration
+ TimeChecksum time.Duration
+ TimePersist time.Duration
+}
+
+// Add adds the result of a snapshot result to this result.
+func (r *SnapshotResultStats) Add(other SnapshotResultStats) {
+ r.TimeMergeByBucket += other.TimeMergeByBucket
+ r.TimeMergeAcrossBuckets += other.TimeMergeAcrossBuckets
+ r.TimeChecksum += other.TimeChecksum
+ r.TimePersist += other.TimePersist
+}
+
// FetchBlocksMetadataOptions encapsulates block fetch metadata options
// and specifies a few series specific options too.
type FetchBlocksMetadataOptions struct {
@@ -352,20 +384,35 @@ type Options interface {
// BufferBucketPool returns the BufferBucketPool.
BufferBucketPool() *BufferBucketPool
+
+ // SetRuntimeOptionsManager sets the runtime options manager.
+ SetRuntimeOptionsManager(value runtime.OptionsManager) Options
+
+ // RuntimeOptionsManager returns the runtime options manager.
+ RuntimeOptionsManager() runtime.OptionsManager
}
// Stats is passed down from namespace/shard to avoid allocations per series.
type Stats struct {
- encoderCreated tally.Counter
- coldWrites tally.Counter
+ encoderCreated tally.Counter
+ coldWrites tally.Counter
+ encodersPerBlock tally.Histogram
+ encoderLimitWriteRejected tally.Counter
+ snapshotMergesEachBucket tally.Counter
}
// NewStats returns a new Stats for the provided scope.
func NewStats(scope tally.Scope) Stats {
subScope := scope.SubScope("series")
+
+ buckets := append(tally.ValueBuckets{0},
+ tally.MustMakeExponentialValueBuckets(1, 2, 20)...)
return Stats{
- encoderCreated: subScope.Counter("encoder-created"),
- coldWrites: subScope.Counter("cold-writes"),
+ encoderCreated: subScope.Counter("encoder-created"),
+ coldWrites: subScope.Counter("cold-writes"),
+ encodersPerBlock: subScope.Histogram("encoders-per-block", buckets),
+ encoderLimitWriteRejected: subScope.Counter("encoder-limit-write-rejected"),
+ snapshotMergesEachBucket: subScope.Counter("snapshot-merges-each-bucket"),
}
}
@@ -379,6 +426,16 @@ func (s Stats) IncColdWrites() {
s.coldWrites.Inc(1)
}
+// RecordEncodersPerBlock records the number of encoders histogram.
+func (s Stats) RecordEncodersPerBlock(num int) {
+ s.encodersPerBlock.RecordValue(float64(num))
+}
+
+// IncEncoderLimitWriteRejected incs the encoderLimitWriteRejected stat.
+func (s Stats) IncEncoderLimitWriteRejected() {
+ s.encoderLimitWriteRejected.Inc(1)
+}
+
// WriteType is an enum for warm/cold write types.
type WriteType int
@@ -407,14 +464,6 @@ type WriteOptions struct {
TruncateType TruncateType
// TransformOptions describes transformation options for incoming writes.
TransformOptions WriteTransformOptions
- // MatchUniqueIndex specifies whether the series unique index
- // must match the unique index value specified (to ensure the series
- // being written is the same series as previously referenced).
- MatchUniqueIndex bool
- // MatchUniqueIndexValue is the series unique index value that
- // must match the current series unique index value (to ensure series
- // being written is the same series as previously referenced).
- MatchUniqueIndexValue uint64
// BootstrapWrite allows a warm write outside the time window as long as the
// block hasn't already been flushed to disk. This is useful for
// bootstrappers filling data that they know has not yet been flushed to
diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go
index 58f2174bf0..00e70dc31d 100644
--- a/src/dbnode/storage/shard.go
+++ b/src/dbnode/storage/shard.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -43,7 +43,9 @@ import (
"github.com/m3db/m3/src/dbnode/storage/repair"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/storage/series/lookup"
+ "github.com/m3db/m3/src/dbnode/tracepoint"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/checked"
@@ -55,6 +57,7 @@ import (
xtime "github.com/m3db/m3/src/x/time"
"github.com/gogo/protobuf/proto"
+ "github.com/opentracing/opentracing-go/log"
"github.com/uber-go/tally"
"go.uber.org/zap"
)
@@ -81,6 +84,8 @@ var (
// ErrDatabaseLoadLimitHit is the error returned when the database load limit
// is hit or exceeded.
ErrDatabaseLoadLimitHit = errors.New("error loading series, database load limit hit")
+
+ emptyDoc = doc.Document{}
)
type filesetsFn func(
@@ -159,7 +164,7 @@ type dbShard struct {
namespaceReaderMgr databaseNamespaceReaderManager
increasingIndex increasingIndex
seriesPool series.DatabaseSeriesPool
- reverseIndex namespaceIndex
+ reverseIndex NamespaceIndex
insertQueue *dbShardInsertQueue
lookup *shardMap
list *list.List
@@ -181,6 +186,7 @@ type dbShard struct {
metrics dbShardMetrics
ticking bool
shard uint32
+ coldWritesEnabled bool
}
// NB(r): dbShardRuntimeOptions does not contain its own
@@ -195,30 +201,60 @@ type dbShardRuntimeOptions struct {
}
type dbShardMetrics struct {
- create tally.Counter
- close tally.Counter
- closeStart tally.Counter
- closeLatency tally.Timer
- insertAsyncInsertErrors tally.Counter
- insertAsyncWriteErrors tally.Counter
- seriesTicked tally.Gauge
+ create tally.Counter
+ close tally.Counter
+ closeStart tally.Counter
+ closeLatency tally.Timer
+ seriesTicked tally.Gauge
+ insertAsyncInsertErrors tally.Counter
+ insertAsyncWriteInternalErrors tally.Counter
+ insertAsyncWriteInvalidParamsErrors tally.Counter
+ insertAsyncIndexErrors tally.Counter
+ snapshotTotalLatency tally.Timer
+ snapshotCheckNeedsSnapshotLatency tally.Timer
+ snapshotPrepareLatency tally.Timer
+ snapshotMergeByBucketLatency tally.Timer
+ snapshotMergeAcrossBucketsLatency tally.Timer
+ snapshotChecksumLatency tally.Timer
+ snapshotPersistLatency tally.Timer
+ snapshotCloseLatency tally.Timer
}
func newDatabaseShardMetrics(shardID uint32, scope tally.Scope) dbShardMetrics {
+ const insertErrorName = "insert-async.errors"
+ snapshotScope := scope.SubScope("snapshot")
return dbShardMetrics{
create: scope.Counter("create"),
close: scope.Counter("close"),
closeStart: scope.Counter("close-start"),
closeLatency: scope.Timer("close-latency"),
- insertAsyncInsertErrors: scope.Tagged(map[string]string{
- "error_type": "insert-series",
- }).Counter("insert-async.errors"),
- insertAsyncWriteErrors: scope.Tagged(map[string]string{
- "error_type": "write-value",
- }).Counter("insert-async.errors"),
seriesTicked: scope.Tagged(map[string]string{
"shard": fmt.Sprintf("%d", shardID),
}).Gauge("series-ticked"),
+ insertAsyncInsertErrors: scope.Tagged(map[string]string{
+ "error_type": "insert-series",
+ "suberror_type": "shard-entry-insert-error",
+ }).Counter(insertErrorName),
+ insertAsyncWriteInternalErrors: scope.Tagged(map[string]string{
+ "error_type": "write-value",
+ "suberror_type": "internal-error",
+ }).Counter(insertErrorName),
+ insertAsyncWriteInvalidParamsErrors: scope.Tagged(map[string]string{
+ "error_type": "write-value",
+ "suberror_type": "invalid-params-error",
+ }).Counter(insertErrorName),
+ insertAsyncIndexErrors: scope.Tagged(map[string]string{
+ "error_type": "reverse-index",
+ "suberror_type": "write-batch-error",
+ }).Counter(insertErrorName),
+ snapshotTotalLatency: snapshotScope.Timer("total-latency"),
+ snapshotCheckNeedsSnapshotLatency: snapshotScope.Timer("check-needs-snapshot-latency"),
+ snapshotPrepareLatency: snapshotScope.Timer("prepare-latency"),
+ snapshotMergeByBucketLatency: snapshotScope.Timer("merge-by-bucket-latency"),
+ snapshotMergeAcrossBucketsLatency: snapshotScope.Timer("merge-across-buckets-latency"),
+ snapshotChecksumLatency: snapshotScope.Timer("checksum-latency"),
+ snapshotPersistLatency: snapshotScope.Timer("persist-latency"),
+ snapshotCloseLatency: snapshotScope.Timer("close-latency"),
}
}
@@ -232,11 +268,16 @@ type shardFlushState struct {
sync.RWMutex
statesByTime map[xtime.UnixNano]fileOpState
initialized bool
+
+ // NB(bodu): Cache state on whether we snapshotted last or not to avoid
+ // going to disk to see if filesets are empty.
+ emptySnapshotOnDiskByTime map[xtime.UnixNano]bool
}
func newShardFlushState() shardFlushState {
return shardFlushState{
- statesByTime: make(map[xtime.UnixNano]fileOpState),
+ statesByTime: make(map[xtime.UnixNano]fileOpState),
+ emptySnapshotOnDiskByTime: make(map[xtime.UnixNano]bool),
}
}
@@ -246,7 +287,7 @@ func newDatabaseShard(
blockRetriever block.DatabaseBlockRetriever,
namespaceReaderMgr databaseNamespaceReaderManager,
increasingIndex increasingIndex,
- reverseIndex namespaceIndex,
+ reverseIndex NamespaceIndex,
needsBootstrap bool,
opts Options,
seriesOpts series.Options,
@@ -278,11 +319,12 @@ func newDatabaseShard(
contextPool: opts.ContextPool(),
flushState: newShardFlushState(),
tickWg: &sync.WaitGroup{},
+ coldWritesEnabled: namespaceMetadata.Options().ColdWritesEnabled(),
logger: opts.InstrumentOptions().Logger(),
metrics: newDatabaseShardMetrics(shard, scope),
}
s.insertQueue = newDatabaseShardInsertQueue(s.insertSeriesBatch,
- s.nowFn, scope)
+ s.nowFn, scope, opts.InstrumentOptions().Logger())
registerRuntimeOptionsListener := func(listener runtime.OptionsListener) {
elem := opts.RuntimeOptionsManager().RegisterListener(listener)
@@ -448,11 +490,13 @@ func (s *dbShard) OnRetrieveBlock(
entry, err = s.newShardEntry(id, newTagsIterArg(tags))
if err != nil {
// should never happen
- s.logger.Error("[invariant violated] unable to create shardEntry from retrieved block data",
- zap.Stringer("id", id),
- zap.Time("startTime", startTime),
- zap.Error(err),
- )
+ instrument.EmitAndLogInvariantViolation(s.opts.InstrumentOptions(),
+ func(logger *zap.Logger) {
+ logger.Error("unable to create shardEntry from retrieved block data",
+ zap.Stringer("id", id),
+ zap.Time("startTime", startTime),
+ zap.Error(err))
+ })
return
}
@@ -460,10 +504,13 @@ func (s *dbShard) OnRetrieveBlock(
// have been already been indexed when it was written
copiedID := entry.Series.ID()
copiedTagsIter := s.identifierPool.TagsIterator()
- copiedTagsIter.Reset(entry.Series.Tags())
+ copiedTagsIter.ResetFields(entry.Series.Metadata().Fields)
s.insertQueue.Insert(dbShardInsert{
entry: entry,
opts: dbShardInsertAsyncOptions{
+ // NB(r): Caching blocks should not be considered for
+ // new series insert rate limit.
+ skipRateLimit: true,
hasPendingRetrievedBlock: true,
pendingRetrievedBlock: dbShardPendingRetrievedBlock{
id: copiedID,
@@ -764,7 +811,7 @@ func (s *dbShard) tickAndExpire(
}
expired = expired[:0]
}
- // Continue
+ // Continue.
return true
})
@@ -828,7 +875,7 @@ func (s *dbShard) WriteTagged(
unit xtime.Unit,
annotation []byte,
wOpts series.WriteOptions,
-) (ts.Series, bool, error) {
+) (SeriesWrite, error) {
return s.writeAndIndex(ctx, id, tags, timestamp,
value, unit, annotation, wOpts, true)
}
@@ -841,7 +888,7 @@ func (s *dbShard) Write(
unit xtime.Unit,
annotation []byte,
wOpts series.WriteOptions,
-) (ts.Series, bool, error) {
+) (SeriesWrite, error) {
return s.writeAndIndex(ctx, id, ident.EmptyTagIterator, timestamp,
value, unit, annotation, wOpts, false)
}
@@ -856,11 +903,11 @@ func (s *dbShard) writeAndIndex(
annotation []byte,
wOpts series.WriteOptions,
shouldReverseIndex bool,
-) (ts.Series, bool, error) {
+) (SeriesWrite, error) {
// Prepare write
entry, opts, err := s.tryRetrieveWritableSeries(id)
if err != nil {
- return ts.Series{}, false, err
+ return SeriesWrite{}, err
}
writable := entry != nil
@@ -876,7 +923,7 @@ func (s *dbShard) writeAndIndex(
},
})
if err != nil {
- return ts.Series{}, false, err
+ return SeriesWrite{}, err
}
// Wait for the insert to be batched together and inserted
@@ -885,7 +932,7 @@ func (s *dbShard) writeAndIndex(
// Retrieve the inserted entry
entry, err = s.writableSeries(id, tags)
if err != nil {
- return ts.Series{}, false, err
+ return SeriesWrite{}, err
}
writable = true
@@ -895,8 +942,9 @@ func (s *dbShard) writeAndIndex(
var (
commitLogSeriesID ident.ID
- commitLogSeriesTags ident.Tags
commitLogSeriesUniqueIndex uint64
+ needsIndex bool
+ pendingIndexInsert writes.PendingIndexInsert
// Err on the side of caution and always write to the commitlog if writing
// async, since there is no information about whether the write succeeded
// or not.
@@ -906,7 +954,7 @@ func (s *dbShard) writeAndIndex(
// Perform write. No need to copy the annotation here because we're using it
// synchronously and all downstream code will copy anthing they need to maintain
// a reference to.
- wasWritten, err = entry.Series.Write(ctx, timestamp, value, unit, annotation, wOpts)
+ wasWritten, _, err = entry.Series.Write(ctx, timestamp, value, unit, annotation, wOpts)
// Load series metadata before decrementing the writer count
// to ensure this metadata is snapshotted at a consistent state
// NB(r): We explicitly do not place the series ID back into a
@@ -914,18 +962,20 @@ func (s *dbShard) writeAndIndex(
// as the commit log need to use the reference without the
// overhead of ownership tracking. This makes taking a ref here safe.
commitLogSeriesID = entry.Series.ID()
- commitLogSeriesTags = entry.Series.Tags()
commitLogSeriesUniqueIndex = entry.Index
if err == nil && shouldReverseIndex {
if entry.NeedsIndexUpdate(s.reverseIndex.BlockStartForWriteTime(timestamp)) {
- err = s.insertSeriesForIndexingAsyncBatched(entry, timestamp,
- opts.writeNewSeriesAsync)
+ if !opts.writeNewSeriesAsync {
+ return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enabled")
+ }
+ needsIndex = true
+ pendingIndexInsert = s.pendingIndexInsert(entry, timestamp)
}
}
// release the reference we got on entry from `writableSeries`
entry.DecrementReaderWriterCount()
if err != nil {
- return ts.Series{}, false, err
+ return SeriesWrite{}, err
}
} else {
// This is an asynchronous insert and write which means we need to clone the annotation
@@ -948,35 +998,40 @@ func (s *dbShard) writeAndIndex(
annotation: annotationClone,
opts: wOpts,
},
- hasPendingIndexing: shouldReverseIndex,
- pendingIndex: dbShardPendingIndex{
- timestamp: timestamp,
- enqueuedAt: s.nowFn(),
- },
})
if err != nil {
- return ts.Series{}, false, err
+ return SeriesWrite{}, err
}
+
+ if shouldReverseIndex {
+ if !opts.writeNewSeriesAsync {
+ return SeriesWrite{}, fmt.Errorf("to index async need write new series to be enabled")
+ }
+ needsIndex = true
+ pendingIndexInsert = s.pendingIndexInsert(result.entry, timestamp)
+ }
+
// NB(r): Make sure to use the copied ID which will eventually
// be set to the newly series inserted ID.
// The `id` var here is volatile after the context is closed
// and adding ownership tracking to use it in the commit log
// (i.e. registering a dependency on the context) is too expensive.
commitLogSeriesID = result.copiedID
- commitLogSeriesTags = result.copiedTags
commitLogSeriesUniqueIndex = result.entry.Index
}
- // Write commit log
- series := ts.Series{
- UniqueIndex: commitLogSeriesUniqueIndex,
- Namespace: s.namespace.ID(),
- ID: commitLogSeriesID,
- Tags: commitLogSeriesTags,
- Shard: s.shard,
- }
-
- return series, wasWritten, nil
+ // Return metadata useful for writing to commit log and indexing.
+ return SeriesWrite{
+ Series: ts.Series{
+ UniqueIndex: commitLogSeriesUniqueIndex,
+ Namespace: s.namespace.ID(),
+ ID: commitLogSeriesID,
+ Shard: s.shard,
+ },
+ WasWritten: wasWritten,
+ NeedsIndex: needsIndex,
+ PendingIndexInsert: pendingIndexInsert,
+ }, nil
}
func (s *dbShard) SeriesReadWriteRef(
@@ -1005,6 +1060,16 @@ func (s *dbShard) SeriesReadWriteRef(
// may have no effect if a collision with the same series
// being put in the insert queue may cause a block to be loaded to a
// series which gets discarded.
+ // TODO(r): Probably can't insert series sync otherwise we stall a ton
+ // of writes... need a better solution for bootstrapping.
+ // This is what can cause writes to degrade during bootstrap if
+ // write lock is super contended.
+ // Having said that, now that writes are kept in a separate "bootstrap"
+ // buffer in the series itself to normal writes then merged at end of
+ // bootstrap it somewhat mitigates some lock contention since the shard
+ // lock is still contended but at least series writes due to commit log
+ // bootstrapping do not interrupt normal writes waiting for ability
+ // to write to an individual series.
at := s.nowFn()
entry, err = s.insertSeriesSync(id, newTagsIterArg(tags), insertSyncOptions{
insertType: insertSyncIncReaderWriterCount,
@@ -1128,66 +1193,52 @@ func (s *dbShard) newShardEntry(
tagsArgOpts tagsArgOptions,
) (*lookup.Entry, error) {
// NB(r): As documented in storage/series.DatabaseSeries the series IDs
- // are garbage collected, hence we cast the ID to a BytesID that can't be
- // finalized.
+ // and metadata are garbage collected, hence we cast the ID to a BytesID
+ // that can't be finalized.
// Since series are purged so infrequently the overhead of not releasing
- // back an ID to a pool is amortized over a long period of time.
+ // back an ID and metadata to a pool is amortized over a long period of
+ // time.
+ // Also of note, when a series is indexed in multiple index segments it is
+ // worth keeping the metadata around so it can be referenced to twice
+ // without creating a new array of []doc.Field for all the tags twice.
+ // Hence this stays on the storage/series.DatabaseSeries for when it needs
+ // to be re-indexed.
var (
- seriesID ident.BytesID
- seriesTags ident.Tags
- err error
+ seriesMetadata doc.Document
+ err error
)
- if id.IsNoFinalize() {
- // If the ID is already marked as NoFinalize, meaning it won't be returned
- // to any pools, then we can directly take reference to it.
- // We make sure to use ident.BytesID for this ID to avoid inc/decref when
- // accessing the ID since it's not pooled and therefore the safety is not
- // required.
- seriesID = ident.BytesID(id.Bytes())
- } else {
- seriesID = ident.BytesID(append([]byte(nil), id.Bytes()...))
- seriesID.NoFinalize()
- }
-
switch tagsArgOpts.arg {
case tagsIterArg:
- // NB(r): Take a duplicate so that we don't double close the tag iterator
- // passed to this method
+ // NB(r): Rewind so we record the tag iterator from the beginning.
tagsIter := tagsArgOpts.tagsIter.Duplicate()
- // Ensure tag iterator at start
- if tagsIter.CurrentIndex() != 0 {
- return nil, errNewShardEntryTagsIterNotAtIndexZero
- }
-
// Pass nil for the identifier pool because the pool will force us to use an array
// with a large capacity to store the tags. Since these tags are long-lived, it's
// better to allocate an array of the exact size to save memory.
- seriesTags, err = convert.TagsFromTagsIter(seriesID, tagsIter, nil)
+ seriesMetadata, err = convert.FromSeriesIDAndTagIter(id, tagsIter)
tagsIter.Close()
if err != nil {
return nil, err
}
- if err := convert.ValidateSeries(seriesID, seriesTags); err != nil {
+ case tagsArg:
+ seriesMetadata, err = convert.FromSeriesIDAndTags(id, tagsArgOpts.tags)
+ if err != nil {
return nil, err
}
- case tagsArg:
- seriesTags = tagsArgOpts.tags
-
default:
return nil, errNewShardEntryTagsTypeInvalid
}
- // Don't put tags back in a pool since the merge logic may still have a
- // handle on these.
- seriesTags.NoFinalize()
+
+ // Use the same bytes as the series metadata for the ID.
+ seriesID := ident.BytesID(seriesMetadata.ID)
uniqueIndex := s.increasingIndex.nextIndex()
newSeries := s.seriesPool.Get()
newSeries.Reset(series.DatabaseSeriesOptions{
ID: seriesID,
- Tags: seriesTags,
+ Metadata: seriesMetadata,
UniqueIndex: uniqueIndex,
BlockRetriever: s.seriesBlockRetriever,
OnRetrieveBlock: s.seriesOnRetrieveBlock,
@@ -1198,15 +1249,30 @@ func (s *dbShard) newShardEntry(
}
type insertAsyncResult struct {
- wg *sync.WaitGroup
- copiedID ident.ID
- copiedTags ident.Tags
+ wg *sync.WaitGroup
+ copiedID ident.ID
// entry is not guaranteed to be the final entry
// inserted into the shard map in case there is already
// an existing entry waiting in the insert queue
entry *lookup.Entry
}
+func (s *dbShard) pendingIndexInsert(
+ entry *lookup.Entry,
+ timestamp time.Time,
+) writes.PendingIndexInsert {
+ // inc a ref on the entry to ensure it's valid until the queue acts upon it.
+ entry.OnIndexPrepare()
+ return writes.PendingIndexInsert{
+ Entry: index.WriteBatchEntry{
+ Timestamp: timestamp,
+ OnIndexSeries: entry,
+ EnqueuedAt: s.nowFn(),
+ },
+ Document: entry.Series.Metadata(),
+ }
+}
+
func (s *dbShard) insertSeriesForIndexingAsyncBatched(
entry *lookup.Entry,
timestamp time.Time,
@@ -1218,6 +1284,9 @@ func (s *dbShard) insertSeriesForIndexingAsyncBatched(
wg, err := s.insertQueue.Insert(dbShardInsert{
entry: entry,
opts: dbShardInsertAsyncOptions{
+ // NB(r): Just indexing, should not be considered for new
+ // series insert rate limiting.
+ skipRateLimit: true,
hasPendingIndexing: true,
pendingIndex: dbShardPendingIndex{
timestamp: timestamp,
@@ -1266,10 +1335,9 @@ func (s *dbShard) insertSeriesAsyncBatched(
})
return insertAsyncResult{
wg: wg,
- // Make sure to return the copied ID from the new series
- copiedID: entry.Series.ID(),
- copiedTags: entry.Series.Tags(),
- entry: entry,
+ // Make sure to return the copied ID from the new series.
+ copiedID: entry.Series.ID(),
+ entry: entry,
}, err
}
@@ -1292,10 +1360,19 @@ func (s *dbShard) insertSeriesSync(
tagsArgOpts tagsArgOptions,
opts insertSyncOptions,
) (*lookup.Entry, error) {
- var (
- entry *lookup.Entry
- err error
- )
+ // NB(r): Create new shard entry outside of write lock to reduce
+ // time using write lock.
+ newEntry, err := s.newShardEntry(id, tagsArgOpts)
+ if err != nil {
+ // should never happen
+ instrument.EmitAndLogInvariantViolation(s.opts.InstrumentOptions(),
+ func(logger *zap.Logger) {
+ logger.Error("insertSeriesSync error creating shard entry",
+ zap.String("id", id.String()),
+ zap.Error(err))
+ })
+ return nil, err
+ }
s.Lock()
unlocked := false
@@ -1305,29 +1382,17 @@ func (s *dbShard) insertSeriesSync(
}
}()
- entry, _, err = s.lookupEntryWithLock(id)
+ existingEntry, _, err := s.lookupEntryWithLock(id)
if err != nil && err != errShardEntryNotFound {
// Shard not taking inserts likely.
return nil, err
}
- if entry != nil {
- // Already inserted.
- return entry, nil
+ if existingEntry != nil {
+ // Already inserted, likely a race.
+ return existingEntry, nil
}
- entry, err = s.newShardEntry(id, tagsArgOpts)
- if err != nil {
- // should never happen
- instrument.EmitAndLogInvariantViolation(s.opts.InstrumentOptions(),
- func(logger *zap.Logger) {
- logger.Error("insertSeriesSync error creating shard entry",
- zap.String("id", id.String()),
- zap.Error(err))
- })
- return nil, err
- }
-
- s.insertNewShardEntryWithLock(entry)
+ s.insertNewShardEntryWithLock(newEntry)
// Track unlocking.
unlocked = true
@@ -1336,8 +1401,11 @@ func (s *dbShard) insertSeriesSync(
// Be sure to enqueue for indexing if requires a pending index.
if opts.hasPendingIndex {
if _, err := s.insertQueue.Insert(dbShardInsert{
- entry: entry,
+ entry: newEntry,
opts: dbShardInsertAsyncOptions{
+ // NB(r): Just indexing, should not be considered for new
+ // series insert rate limiting.
+ skipRateLimit: true,
hasPendingIndexing: opts.hasPendingIndex,
pendingIndex: opts.pendingIndex,
},
@@ -1350,10 +1418,10 @@ func (s *dbShard) insertSeriesSync(
// to increment the writer count so it's visible when we release
// the lock.
if opts.insertType == insertSyncIncReaderWriterCount {
- entry.IncrementReaderWriterCount()
+ newEntry.IncrementReaderWriterCount()
}
- return entry, nil
+ return newEntry, nil
}
func (s *dbShard) insertNewShardEntryWithLock(entry *lookup.Entry) {
@@ -1401,25 +1469,25 @@ func (s *dbShard) insertSeriesBatch(inserts []dbShardInsert) error {
// for the same ID.
entry, _, err := s.lookupEntryWithLock(inserts[i].entry.Series.ID())
if entry != nil {
- // Already exists so update the entry we're pointed at for this insert
+ // Already exists so update the entry we're pointed at for this insert.
inserts[i].entry = entry
}
if hasPendingIndexing || hasPendingWrite || hasPendingRetrievedBlock {
// We're definitely writing a value, ensure that the pending write is
- // visible before we release the lookup write lock
+ // visible before we release the lookup write lock.
inserts[i].entry.IncrementReaderWriterCount()
- // also indicate that we have a ref count on this entry for this operation
+ // also indicate that we have a ref count on this entry for this operation.
inserts[i].opts.entryRefCountIncremented = true
}
if err == nil {
- // Already inserted
+ // Already inserted.
continue
}
if err != errShardEntryNotFound {
- // Shard is not taking inserts
+ // Shard is not taking inserts.
s.Unlock()
// FOLLOWUP(prateek): is this an existing bug? why don't we need to release any ref's we've inc'd
// on entries in the loop before this point, i.e. in range [0, i). Otherwise, how are those entries
@@ -1450,6 +1518,7 @@ func (s *dbShard) insertSeriesBatch(inserts []dbShardInsert) error {
var (
entry = inserts[i].entry
releaseEntryRef = inserts[i].opts.entryRefCountIncremented
+ err error
)
if inserts[i].opts.hasPendingWrite {
@@ -1462,10 +1531,15 @@ func (s *dbShard) insertSeriesBatch(inserts []dbShardInsert) error {
// operation and there is nothing further to do with this value.
// TODO: Consider propagating the `wasWritten` argument back to the caller
// using waitgroup (or otherwise) in the future.
- _, err := entry.Series.Write(ctx, write.timestamp, write.value,
+ _, _, err = entry.Series.Write(ctx, write.timestamp, write.value,
write.unit, annotationBytes, write.opts)
if err != nil {
- s.metrics.insertAsyncWriteErrors.Inc(1)
+ if xerrors.IsInvalidParams(err) {
+ s.metrics.insertAsyncWriteInvalidParamsErrors.Inc(1)
+ } else {
+ s.metrics.insertAsyncWriteInternalErrors.Inc(1)
+ s.logger.Error("error with async insert write", zap.Error(err))
+ }
}
if write.annotation != nil {
@@ -1483,23 +1557,13 @@ func (s *dbShard) insertSeriesBatch(inserts []dbShardInsert) error {
// this method (insertSeriesBatch) via `entryRefCountIncremented` mechanism.
entry.OnIndexPrepare()
- id := entry.Series.ID()
- tags := entry.Series.Tags().Values()
-
- var d doc.Document
- d.ID = id.Bytes() // IDs from shard entries are always set NoFinalize
- d.Fields = make(doc.Fields, 0, len(tags))
- for _, tag := range tags {
- d.Fields = append(d.Fields, doc.Field{
- Name: tag.Name.Bytes(), // Tags from shard entries are always set NoFinalize
- Value: tag.Value.Bytes(), // Tags from shard entries are always set NoFinalize
- })
- }
- indexBatch.Append(index.WriteBatchEntry{
+ writeBatchEntry := index.WriteBatchEntry{
Timestamp: pendingIndex.timestamp,
OnIndexSeries: entry,
EnqueuedAt: pendingIndex.enqueuedAt,
- }, d)
+ }
+
+ indexBatch.Append(writeBatchEntry, entry.Series.Metadata())
}
if inserts[i].opts.hasPendingRetrievedBlock {
@@ -1507,6 +1571,9 @@ func (s *dbShard) insertSeriesBatch(inserts []dbShardInsert) error {
entry.Series.OnRetrieveBlock(block.id, block.tags, block.start, block.segment, block.nsCtx)
}
+ // Entries in the shard insert queue are either of:
+ // - new entries
+ // - existing entries that we've taken a ref on (marked as entryRefCountIncremented)
if releaseEntryRef {
entry.DecrementReaderWriterCount()
}
@@ -1514,8 +1581,11 @@ func (s *dbShard) insertSeriesBatch(inserts []dbShardInsert) error {
var err error
// index all requested entries in batch.
- if indexBatch.Len() > 0 {
+ if n := indexBatch.Len(); n > 0 {
err = s.reverseIndex.WriteBatch(indexBatch)
+ if err != nil {
+ s.metrics.insertAsyncIndexErrors.Inc(int64(n))
+ }
}
// Avoid goroutine spinning up to close this context
@@ -1570,12 +1640,12 @@ func (s *dbShard) FetchBlocksForColdFlush(
start time.Time,
version int,
nsCtx namespace.Context,
-) ([]xio.BlockReader, error) {
+) (block.FetchBlockResult, error) {
s.RLock()
entry, _, err := s.lookupEntryWithLock(seriesID)
s.RUnlock()
if entry == nil || err != nil {
- return nil, err
+ return block.FetchBlockResult{}, err
}
return entry.Series.FetchBlocksForColdFlush(ctx, start, version, nsCtx)
@@ -1649,6 +1719,11 @@ func (s *dbShard) FetchBlocksMetadataV2(
if err := proto.Unmarshal(encodedPageToken, token); err != nil {
return nil, nil, xerrors.NewInvalidParamsError(errShardInvalidPageToken)
}
+ } else {
+ // NB(bodu): Allow callers to specify that they only want results from disk.
+ if opts.OnlyDisk {
+ token.FlushedSeriesPhase = &pagetoken.PageToken_FlushedSeriesPhase{}
+ }
}
// NB(r): If returning mixed in memory and disk results, then we return anything
@@ -1846,7 +1921,14 @@ func (s *dbShard) FetchBlocksMetadataV2(
return result, nil, nil
}
-func (s *dbShard) PrepareBootstrap() error {
+func (s *dbShard) PrepareBootstrap(ctx context.Context) error {
+ ctx, span, sampled := ctx.StartSampledTraceSpan(tracepoint.ShardPrepareBootstrap)
+ defer span.Finish()
+
+ if sampled {
+ span.LogFields(log.Int("shard", int(s.shard)))
+ }
+
// Iterate flushed time ranges to determine which blocks are retrievable.
// NB(r): This must be done before bootstrap since during bootstrapping
// series will load blocks into series with series.LoadBlock(...) which
@@ -1877,7 +1959,7 @@ func (s *dbShard) initializeFlushStates() {
func (s *dbShard) UpdateFlushStates() {
fsOpts := s.opts.CommitLogOptions().FilesystemOptions()
readInfoFilesResults := fs.ReadInfoFiles(fsOpts.FilePathPrefix(), s.namespace.ID(), s.shard,
- fsOpts.InfoReaderBufferSize(), fsOpts.DecodingOptions())
+ fsOpts.InfoReaderBufferSize(), fsOpts.DecodingOptions(), persist.FileSetFlushType)
for _, result := range readInfoFilesResults {
if err := result.Err.Error(); err != nil {
@@ -1910,7 +1992,17 @@ func (s *dbShard) UpdateFlushStates() {
}
}
-func (s *dbShard) Bootstrap() error {
+func (s *dbShard) Bootstrap(
+ ctx context.Context,
+ nsCtx namespace.Context,
+) error {
+ ctx, span, sampled := ctx.StartSampledTraceSpan(tracepoint.ShardBootstrap)
+ defer span.Finish()
+
+ if sampled {
+ span.LogFields(log.Int("shard", int(s.shard)))
+ }
+
s.Lock()
if s.bootstrapState == Bootstrapped {
s.Unlock()
@@ -1926,7 +2018,7 @@ func (s *dbShard) Bootstrap() error {
multiErr := xerrors.NewMultiError()
// Initialize the flush states if we haven't called prepare bootstrap.
- if err := s.PrepareBootstrap(); err != nil {
+ if err := s.PrepareBootstrap(ctx); err != nil {
multiErr = multiErr.Add(err)
}
@@ -1937,6 +2029,14 @@ func (s *dbShard) Bootstrap() error {
multiErr = multiErr.Add(err)
}
+ // Move any bootstrap buffers into position for reading.
+ s.forEachShardEntry(func(entry *lookup.Entry) bool {
+ if err := entry.Series.Bootstrap(nsCtx); err != nil {
+ multiErr = multiErr.Add(err)
+ }
+ return true
+ })
+
s.Lock()
s.bootstrapState = Bootstrapped
s.Unlock()
@@ -2072,17 +2172,12 @@ func (s *dbShard) loadBlock(
}
func (s *dbShard) cacheShardIndices() error {
- retrieverMgr := s.opts.DatabaseBlockRetrieverManager()
+ retriever := s.DatabaseBlockRetriever
// May be nil depending on the caching policy.
- if retrieverMgr == nil {
+ if retriever == nil {
return nil
}
- retriever, err := retrieverMgr.Retriever(s.namespace)
- if err != nil {
- return err
- }
-
s.logger.Debug("caching shard indices", zap.Uint32("shard", s.ID()))
if err := retriever.CacheShardIndices([]uint32{s.ID()}); err != nil {
s.logger.Error("caching shard indices error",
@@ -2166,12 +2261,13 @@ func (s *dbShard) ColdFlush(
flushPreparer persist.FlushPreparer,
resources coldFlushReuseableResources,
nsCtx namespace.Context,
-) error {
+ onFlush persist.OnFlushSeries,
+) (ShardColdFlush, error) {
// We don't flush data when the shard is still bootstrapping.
s.RLock()
if s.bootstrapState != Bootstrapped {
s.RUnlock()
- return errShardNotBootstrappedToFlush
+ return shardColdFlush{}, errShardNotBootstrappedToFlush
}
// Use blockStatesSnapshotWithRLock to avoid having to re-acquire read lock.
blockStates := s.blockStatesSnapshotWithRLock()
@@ -2187,7 +2283,7 @@ func (s *dbShard) ColdFlush(
blockStatesSnapshot, bootstrapped := blockStates.UnwrapValue()
if !bootstrapped {
- return errFlushStateIsNotInitialized
+ return shardColdFlush{}, errFlushStateIsNotInitialized
}
var (
@@ -2199,7 +2295,7 @@ func (s *dbShard) ColdFlush(
// series and add them to the resources for further processing.
s.forEachShardEntry(func(entry *lookup.Entry) bool {
curr := entry.Series
- seriesID := curr.ID()
+ seriesMetadata := curr.Metadata()
blockStarts := curr.ColdFlushBlockStarts(blockStatesSnapshot)
blockStarts.ForEach(func(t xtime.UnixNano) {
// Cold flushes can only happen on blockStarts that have been
@@ -2221,15 +2317,18 @@ func (s *dbShard) ColdFlush(
seriesList = newIDList(idElementPool)
dirtySeriesToWrite[t] = seriesList
}
- element := seriesList.PushBack(seriesID)
+ element := seriesList.PushBack(seriesMetadata)
- dirtySeries.Set(idAndBlockStart{blockStart: t, id: seriesID}, element)
+ dirtySeries.Set(idAndBlockStart{
+ blockStart: t,
+ id: seriesMetadata.ID,
+ }, element)
})
return true
})
if loopErr != nil {
- return loopErr
+ return shardColdFlush{}, loopErr
}
if dirtySeries.Len() == 0 {
@@ -2237,12 +2336,17 @@ func (s *dbShard) ColdFlush(
// may be non-empty when dirtySeries is empty because we purposely
// leave empty seriesLists in the dirtySeriesToWrite map to avoid having
// to reallocate them in subsequent usages of the shared resource.
- return nil
+ return shardColdFlush{}, nil
}
+ flush := shardColdFlush{
+ shard: s,
+ doneFns: make([]shardColdFlushDone, 0, len(dirtySeriesToWrite)),
+ }
merger := s.newMergerFn(resources.fsReader, s.opts.DatabaseBlockOptions().DatabaseBlockAllocSize(),
s.opts.SegmentReaderPool(), s.opts.MultiReaderIteratorPool(),
- s.opts.IdentifierPool(), s.opts.EncoderPool(), s.opts.ContextPool(), s.namespace.Options())
+ s.opts.IdentifierPool(), s.opts.EncoderPool(), s.opts.ContextPool(),
+ s.opts.CommitLogOptions().FilesystemOptions().FilePathPrefix(), s.namespace.Options())
mergeWithMem := s.newFSMergeWithMemFn(s, s, dirtySeries, dirtySeriesToWrite)
// Loop through each block that we know has ColdWrites. Since each block
// has its own fileset, if we encounter an error while trying to persist
@@ -2263,54 +2367,18 @@ func (s *dbShard) ColdFlush(
}
nextVersion := coldVersion + 1
- err = merger.Merge(fsID, mergeWithMem, nextVersion, flushPreparer, nsCtx)
- if err != nil {
- multiErr = multiErr.Add(err)
- continue
- }
-
- // After writing the full block successfully update the ColdVersionFlushed number. This will
- // allow the SeekerManager to open a lease on the latest version of the fileset files because
- // the BlockLeaseVerifier will check the ColdVersionFlushed value, but the buffer only looks at
- // ColdVersionRetrievable so a concurrent tick will not yet cause the blocks in memory to be
- // evicted (which is the desired behavior because we haven't updated the open leases yet which
- // means the newly written data is not available for querying via the SeekerManager yet.)
- s.setFlushStateColdVersionFlushed(startTime, nextVersion)
-
- // Notify all block leasers that a new volume for the namespace/shard/blockstart
- // has been created. This will block until all leasers have relinquished their
- // leases.
- _, err = s.opts.BlockLeaseManager().UpdateOpenLeases(block.LeaseDescriptor{
- Namespace: s.namespace.ID(),
- Shard: s.ID(),
- BlockStart: startTime,
- }, block.LeaseState{Volume: nextVersion})
- // After writing the full block successfully **and** propagating the new lease to the
- // BlockLeaseManager, update the ColdVersionRetrievable in the flush state. Once this function
- // completes concurrent ticks will be able to evict the data from memory that was just flushed
- // (which is now safe to do since the SeekerManager has been notified of the presence of new
- // files).
- //
- // NB(rartoul): Ideally the ColdVersionRetrievable would only be updated if the call to UpdateOpenLeases
- // succeeded, but that would allow the ColdVersionRetrievable and ColdVersionFlushed numbers to drift
- // which would increase the complexity of the code to address a situation that is probably not
- // recoverable (failure to UpdateOpenLeases is an invariant violated error).
- s.setFlushStateColdVersionRetrievable(startTime, nextVersion)
+ close, err := merger.Merge(fsID, mergeWithMem, nextVersion, flushPreparer, nsCtx, onFlush)
if err != nil {
- instrument.EmitAndLogInvariantViolation(s.opts.InstrumentOptions(), func(l *zap.Logger) {
- l.With(
- zap.String("namespace", s.namespace.ID().String()),
- zap.Uint32("shard", s.ID()),
- zap.Time("blockStart", startTime),
- zap.Int("nextVersion", nextVersion),
- ).Error("failed to update open leases after updating flush state cold version")
- })
multiErr = multiErr.Add(err)
continue
}
+ flush.doneFns = append(flush.doneFns, shardColdFlushDone{
+ startTime: startTime,
+ nextVersion: nextVersion,
+ close: close,
+ })
}
-
- return multiErr.FinalError()
+ return flush, multiErr.FinalError()
}
func (s *dbShard) Snapshot(
@@ -2318,16 +2386,43 @@ func (s *dbShard) Snapshot(
snapshotTime time.Time,
snapshotPreparer persist.SnapshotPreparer,
nsCtx namespace.Context,
-) error {
+) (ShardSnapshotResult, error) {
// We don't snapshot data when the shard is still bootstrapping
s.RLock()
if s.bootstrapState != Bootstrapped {
s.RUnlock()
- return errShardNotBootstrappedToSnapshot
+ return ShardSnapshotResult{}, errShardNotBootstrappedToSnapshot
}
+
s.RUnlock()
- var multiErr xerrors.MultiError
+ // Record per-shard snapshot latency, not many shards so safe
+ // to use a timer.
+ totalTimer := s.metrics.snapshotTotalLatency.Start()
+ defer totalTimer.Stop()
+
+ var needsSnapshot bool
+ checkNeedsSnapshotTimer := s.metrics.snapshotCheckNeedsSnapshotLatency.Start()
+ s.forEachShardEntry(func(entry *lookup.Entry) bool {
+ if !entry.Series.IsBufferEmptyAtBlockStart(blockStart) {
+ needsSnapshot = true
+ return false
+ }
+ return true
+ })
+ checkNeedsSnapshotTimer.Stop()
+
+ // Only terminate early when we would be over-writing an empty snapshot fileset on disk.
+ // TODO(bodu): We could bootstrap empty snapshot state in the bs path to avoid doing extra
+ // snapshotting work after a bootstrap since this cached state gets cleared.
+ s.flushState.RLock()
+ // NB(bodu): This always defaults to false if the record does not exist.
+ emptySnapshotOnDisk := s.flushState.emptySnapshotOnDiskByTime[xtime.ToUnixNano(blockStart)]
+ s.flushState.RUnlock()
+
+ if !needsSnapshot && emptySnapshotOnDisk {
+ return ShardSnapshotResult{}, nil
+ }
prepareOpts := persist.DataPrepareOptions{
NamespaceMetadata: s.namespace,
@@ -2343,20 +2438,25 @@ func (s *dbShard) Snapshot(
SnapshotTime: snapshotTime,
},
}
+ prepareTimer := s.metrics.snapshotPrepareLatency.Start()
prepared, err := snapshotPreparer.PrepareData(prepareOpts)
- // Add the err so the defer will capture it
- multiErr = multiErr.Add(err)
+ prepareTimer.Stop()
if err != nil {
- return err
+ return ShardSnapshotResult{}, err
}
- snapshotCtx := s.contextPool.Get()
+ var (
+ snapshotCtx = s.contextPool.Get()
+ persist int
+ stats series.SnapshotResultStats
+ multiErr xerrors.MultiError
+ )
s.forEachShardEntry(func(entry *lookup.Entry) bool {
series := entry.Series
// Use a temporary context here so the stream readers can be returned to
// pool after we finish fetching flushing the series
snapshotCtx.Reset()
- err := series.Snapshot(snapshotCtx, blockStart, prepared.Persist, nsCtx)
+ result, err := series.Snapshot(snapshotCtx, blockStart, prepared.Persist, nsCtx)
snapshotCtx.BlockingCloseReset()
if err != nil {
@@ -2366,14 +2466,47 @@ func (s *dbShard) Snapshot(
return false
}
+ if result.Persist {
+ persist++
+ }
+
+ // Add snapshot result to cumulative result.
+ stats.Add(result.Stats)
return true
})
- if err := prepared.Close(); err != nil {
- multiErr = multiErr.Add(err)
+ // Emit cumulative snapshot result timings.
+ if multiErr.NumErrors() == 0 {
+ s.metrics.snapshotMergeByBucketLatency.Record(stats.TimeMergeByBucket)
+ s.metrics.snapshotMergeAcrossBucketsLatency.Record(stats.TimeMergeAcrossBuckets)
+ s.metrics.snapshotChecksumLatency.Record(stats.TimeChecksum)
+ s.metrics.snapshotPersistLatency.Record(stats.TimePersist)
}
- return multiErr.FinalError()
+ closeTimer := s.metrics.snapshotCloseLatency.Start()
+ multiErr = multiErr.Add(prepared.Close())
+ closeTimer.Stop()
+
+ if err := multiErr.FinalError(); err != nil {
+ return ShardSnapshotResult{}, err
+ }
+
+ // Only update cached snapshot state if we successfully flushed data to disk.
+ s.flushState.Lock()
+ if needsSnapshot {
+ s.flushState.emptySnapshotOnDiskByTime[xtime.ToUnixNano(blockStart)] = false
+ } else {
+ // NB(bodu): If we flushed an empty snapshot to disk, it means that the previous
+ // snapshot on disk was not empty (or we just bootstrapped and cached state was lost).
+ // The snapshot we just flushed may or may not have data, although whatever data we flushed
+ // would be recoverable from the rotate commit log as well.
+ s.flushState.emptySnapshotOnDiskByTime[xtime.ToUnixNano(blockStart)] = true
+ }
+ s.flushState.Unlock()
+
+ return ShardSnapshotResult{
+ SeriesPersist: persist,
+ }, nil
}
func (s *dbShard) FlushState(blockStart time.Time) (fileOpState, error) {
@@ -2517,17 +2650,6 @@ func (s *dbShard) Repair(
return repairer.Repair(ctx, nsCtx, nsMeta, tr, s)
}
-func (s *dbShard) TagsFromSeriesID(seriesID ident.ID) (ident.Tags, bool, error) {
- s.RLock()
- entry, _, err := s.lookupEntryWithLock(seriesID)
- s.RUnlock()
- if entry == nil || err != nil {
- return ident.Tags{}, false, err
- }
-
- return entry.Series.Tags(), true, nil
-}
-
func (s *dbShard) BootstrapState() BootstrapState {
s.RLock()
bs := s.bootstrapState
@@ -2535,6 +2657,20 @@ func (s *dbShard) BootstrapState() BootstrapState {
return bs
}
+func (s *dbShard) DocRef(id ident.ID) (doc.Document, bool, error) {
+ s.RLock()
+ defer s.RUnlock()
+
+ entry, _, err := s.lookupEntryWithLock(id)
+ if err == nil {
+ return entry.Series.Metadata(), true, nil
+ }
+ if err == errShardEntryNotFound {
+ return emptyDoc, false, nil
+ }
+ return emptyDoc, false, err
+}
+
func (s *dbShard) logFlushResult(r dbShardFlushResult) {
s.logger.Debug("shard flush outcome",
zap.Uint32("shard", s.ID()),
@@ -2542,6 +2678,70 @@ func (s *dbShard) logFlushResult(r dbShardFlushResult) {
)
}
+type shardColdFlushDone struct {
+ startTime time.Time
+ nextVersion int
+ close persist.DataCloser
+}
+
+type shardColdFlush struct {
+ shard *dbShard
+ doneFns []shardColdFlushDone
+}
+
+func (s shardColdFlush) Done() error {
+ multiErr := xerrors.NewMultiError()
+ for _, done := range s.doneFns {
+ startTime := done.startTime
+ nextVersion := done.nextVersion
+
+ if err := done.close(); err != nil {
+ multiErr = multiErr.Add(err)
+ continue
+ }
+ // After writing the full block successfully update the ColdVersionFlushed number. This will
+ // allow the SeekerManager to open a lease on the latest version of the fileset files because
+ // the BlockLeaseVerifier will check the ColdVersionFlushed value, but the buffer only looks at
+ // ColdVersionRetrievable so a concurrent tick will not yet cause the blocks in memory to be
+ // evicted (which is the desired behavior because we haven't updated the open leases yet which
+ // means the newly written data is not available for querying via the SeekerManager yet.)
+ s.shard.setFlushStateColdVersionFlushed(startTime, nextVersion)
+
+ // Notify all block leasers that a new volume for the namespace/shard/blockstart
+ // has been created. This will block until all leasers have relinquished their
+ // leases.
+ _, err := s.shard.opts.BlockLeaseManager().UpdateOpenLeases(block.LeaseDescriptor{
+ Namespace: s.shard.namespace.ID(),
+ Shard: s.shard.ID(),
+ BlockStart: startTime,
+ }, block.LeaseState{Volume: nextVersion})
+ // After writing the full block successfully **and** propagating the new lease to the
+ // BlockLeaseManager, update the ColdVersionRetrievable in the flush state. Once this function
+ // completes concurrent ticks will be able to evict the data from memory that was just flushed
+ // (which is now safe to do since the SeekerManager has been notified of the presence of new
+ // files).
+ //
+ // NB(rartoul): Ideally the ColdVersionRetrievable would only be updated if the call to UpdateOpenLeases
+ // succeeded, but that would allow the ColdVersionRetrievable and ColdVersionFlushed numbers to drift
+ // which would increase the complexity of the code to address a situation that is probably not
+ // recoverable (failure to UpdateOpenLeases is an invariant violated error).
+ s.shard.setFlushStateColdVersionRetrievable(startTime, nextVersion)
+ if err != nil {
+ instrument.EmitAndLogInvariantViolation(s.shard.opts.InstrumentOptions(), func(l *zap.Logger) {
+ l.With(
+ zap.String("namespace", s.shard.namespace.ID().String()),
+ zap.Uint32("shard", s.shard.ID()),
+ zap.Time("blockStart", startTime),
+ zap.Int("nextVersion", nextVersion),
+ ).Error("failed to update open leases after updating flush state cold version")
+ })
+ multiErr = multiErr.Add(err)
+ continue
+ }
+ }
+ return multiErr.FinalError()
+}
+
// dbShardFlushResult is a helper struct for keeping track of the result of flushing all the
// series in the shard.
type dbShardFlushResult struct {
diff --git a/src/dbnode/storage/shard_fetch_blocks_metadata_test.go b/src/dbnode/storage/shard_fetch_blocks_metadata_test.go
index ecad3df4b2..8d282c24af 100644
--- a/src/dbnode/storage/shard_fetch_blocks_metadata_test.go
+++ b/src/dbnode/storage/shard_fetch_blocks_metadata_test.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/generated/proto/pagetoken"
+ "github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/series"
@@ -194,7 +195,9 @@ func TestShardFetchBlocksMetadataV2WithSeriesCachePolicyNotCacheAll(t *testing.T
bytes := checked.NewBytes(data, nil)
bytes.IncRef()
- err = writer.Write(id, ident.Tags{}, bytes, checksum)
+ meta := persist.NewMetadataFromIDAndTags(id, ident.Tags{},
+ persist.MetadataOptions{})
+ err = writer.Write(meta, bytes, checksum)
require.NoError(t, err)
blockMetadataResult := block.NewFetchBlockMetadataResult(at,
diff --git a/src/dbnode/storage/shard_index_test.go b/src/dbnode/storage/shard_index_test.go
index c9092661a0..9f5087ebb5 100644
--- a/src/dbnode/storage/shard_index_test.go
+++ b/src/dbnode/storage/shard_index_test.go
@@ -21,9 +21,7 @@
package storage
import (
- "fmt"
"sync"
- "sync/atomic"
"testing"
"time"
@@ -31,9 +29,9 @@ import (
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/m3ninx/doc"
- xclock "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
"github.com/fortytw2/leaktest"
@@ -57,7 +55,7 @@ func TestShardInsertNamespaceIndex(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
idx.EXPECT().BlockStartForWriteTime(gomock.Any()).Return(blockStart).AnyTimes()
idx.EXPECT().WriteBatch(gomock.Any()).Do(
func(batch *index.WriteBatch) {
@@ -72,29 +70,29 @@ func TestShardInsertNamespaceIndex(t *testing.T) {
}
}).Return(nil).AnyTimes()
- shard := testDatabaseShardWithIndexFn(t, opts, idx)
+ shard := testDatabaseShardWithIndexFn(t, opts, idx, false)
shard.SetRuntimeOptions(runtime.NewOptions().SetWriteNewSeriesAsync(false))
defer shard.Close()
ctx := context.NewContext()
defer ctx.Close()
- _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"),
+ seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"),
ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
now, 1.0, xtime.Second, nil, series.WriteOptions{})
require.NoError(t, err)
- require.True(t, wasWritten)
+ require.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"),
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"),
ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
now, 2.0, xtime.Second, nil, series.WriteOptions{})
require.NoError(t, err)
- require.True(t, wasWritten)
+ require.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(
+ seriesWrite, err = shard.Write(
ctx, ident.StringID("baz"), now, 1.0, xtime.Second, nil, series.WriteOptions{})
require.NoError(t, err)
- require.True(t, wasWritten)
+ require.True(t, seriesWrite.WasWritten)
lock.Lock()
defer lock.Unlock()
@@ -105,221 +103,105 @@ func TestShardInsertNamespaceIndex(t *testing.T) {
require.Equal(t, []byte("value"), indexWrites[0].Fields[0].Value)
}
-func TestShardAsyncInsertNamespaceIndex(t *testing.T) {
- defer leaktest.CheckTimeout(t, 2*time.Second)()
-
- opts := DefaultTestOptions()
- lock := sync.RWMutex{}
- indexWrites := []doc.Document{}
-
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
- idx := NewMocknamespaceIndex(ctrl)
- idx.EXPECT().WriteBatch(gomock.Any()).Do(
- func(batch *index.WriteBatch) {
- lock.Lock()
- indexWrites = append(indexWrites, batch.PendingDocs()...)
- lock.Unlock()
- }).Return(nil).AnyTimes()
-
- shard := testDatabaseShardWithIndexFn(t, opts, idx)
- shard.SetRuntimeOptions(runtime.NewOptions().SetWriteNewSeriesAsync(true))
- defer shard.Close()
-
- ctx := context.NewContext()
- defer ctx.Close()
- now := time.Now()
- _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"),
- ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
- now, 1.0, xtime.Second, nil, series.WriteOptions{})
- assert.NoError(t, err)
- assert.True(t, wasWritten)
-
- _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), now,
- 1.0, xtime.Second, nil, series.WriteOptions{})
- assert.NoError(t, err)
- assert.True(t, wasWritten)
-
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"),
- ident.NewTagsIterator(ident.NewTags(
- ident.StringTag("all", "tags"),
- ident.StringTag("should", "be-present"),
- )),
- now, 1.0, xtime.Second, nil, series.WriteOptions{})
- assert.NoError(t, err)
- assert.True(t, wasWritten)
-
- for {
- lock.RLock()
- l := len(indexWrites)
- lock.RUnlock()
- if l == 2 {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- lock.Lock()
- defer lock.Unlock()
-
- assert.Len(t, indexWrites, 2)
- for _, w := range indexWrites {
- if string(w.ID) == "foo" {
- assert.Equal(t, 1, len(w.Fields))
- assert.Equal(t, "name", string(w.Fields[0].Name))
- assert.Equal(t, "value", string(w.Fields[0].Value))
- } else if string(w.ID) == "baz" {
- assert.Equal(t, 2, len(w.Fields))
- assert.Equal(t, "all", string(w.Fields[0].Name))
- assert.Equal(t, "tags", string(w.Fields[0].Value))
- assert.Equal(t, "should", string(w.Fields[1].Name))
- assert.Equal(t, "be-present", string(w.Fields[1].Value))
- } else {
- assert.Fail(t, "unexpected write", w)
- }
- }
-}
-
-func TestShardAsyncIndexOnlyWhenNotIndexed(t *testing.T) {
+func TestShardAsyncInsertMarkIndexedForBlockStart(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
defer leaktest.CheckTimeout(t, 2*time.Second)()
- var numCalls int32
opts := DefaultTestOptions()
blockSize := time.Hour
now := time.Now()
nextWriteTime := now.Truncate(blockSize)
- idx := NewMocknamespaceIndex(ctrl)
- idx.EXPECT().BlockStartForWriteTime(gomock.Any()).
- DoAndReturn(func(t time.Time) xtime.UnixNano {
- return xtime.ToUnixNano(t.Truncate(blockSize))
- }).
- AnyTimes()
- idx.EXPECT().WriteBatch(gomock.Any()).Do(
- func(batch *index.WriteBatch) {
- if batch.Len() == 0 {
- panic(fmt.Errorf("expected batch of len 1")) // panic to avoid goroutine exit from require
- }
- onIdx := batch.PendingEntries()[0].OnIndexSeries
- onIdx.OnIndexSuccess(xtime.ToUnixNano(nextWriteTime)) // i.e. mark that the entry should not be indexed for an hour at least
- onIdx.OnIndexFinalize(xtime.ToUnixNano(nextWriteTime))
- current := atomic.AddInt32(&numCalls, 1)
- if current > 1 {
- panic("only need to index when not-indexed")
- }
- }).Return(nil)
-
- shard := testDatabaseShardWithIndexFn(t, opts, idx)
+ idx := NewMockNamespaceIndex(ctrl)
+ shard := testDatabaseShardWithIndexFn(t, opts, idx, false)
shard.SetRuntimeOptions(runtime.NewOptions().SetWriteNewSeriesAsync(true))
defer shard.Close()
ctx := context.NewContext()
defer ctx.Close()
- _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"),
+ // write first time
+ seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"),
ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
-
- for {
- if l := atomic.LoadInt32(&numCalls); l == 1 {
- break
+ assert.True(t, seriesWrite.WasWritten)
+ assert.True(t, seriesWrite.NeedsIndex)
+
+ // mark as indexed
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(xtime.ToUnixNano(nextWriteTime))
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(xtime.ToUnixNano(nextWriteTime))
+
+ start := time.Now()
+ for time.Since(start) < 10*time.Second {
+ entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo"))
+ require.NoError(t, err)
+ if entry == nil {
+ time.Sleep(10 * time.Millisecond)
+ continue
}
- time.Sleep(10 * time.Millisecond)
+ assert.True(t, entry.IndexedForBlockStart(xtime.ToUnixNano(nextWriteTime)))
+ break // done
}
-
- // ensure we don't index once we have already indexed
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"),
- ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
- now.Add(time.Second), 2.0, xtime.Second, nil, series.WriteOptions{})
- assert.NoError(t, err)
- assert.True(t, wasWritten)
-
- // ensure attempting to write same point yields false and does not write
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"),
- ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
- now.Add(time.Second), 2.0, xtime.Second, nil, series.WriteOptions{})
- assert.NoError(t, err)
- assert.False(t, wasWritten)
-
- l := atomic.LoadInt32(&numCalls)
- assert.Equal(t, int32(1), l)
-
- entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo"))
- assert.NoError(t, err)
- assert.True(t, entry.IndexedForBlockStart(xtime.ToUnixNano(nextWriteTime)))
}
func TestShardAsyncIndexIfExpired(t *testing.T) {
defer leaktest.CheckTimeout(t, 2*time.Second)()
- var numCalls int32
-
// Make now not rounded exactly to the block size
blockSize := time.Minute
now := time.Now().Truncate(blockSize).Add(time.Second)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
idx.EXPECT().BlockStartForWriteTime(gomock.Any()).
DoAndReturn(func(t time.Time) xtime.UnixNano {
return xtime.ToUnixNano(t.Truncate(blockSize))
}).
AnyTimes()
- idx.EXPECT().WriteBatch(gomock.Any()).
- Return(nil).
- Do(func(batch *index.WriteBatch) {
- for _, b := range batch.PendingEntries() {
- blockStart := b.Timestamp.Truncate(blockSize)
- b.OnIndexSeries.OnIndexSuccess(xtime.ToUnixNano(blockStart))
- b.OnIndexSeries.OnIndexFinalize(xtime.ToUnixNano(blockStart))
- atomic.AddInt32(&numCalls, 1)
- }
- }).
- AnyTimes()
opts := DefaultTestOptions()
- shard := testDatabaseShardWithIndexFn(t, opts, idx)
+ shard := testDatabaseShardWithIndexFn(t, opts, idx, false)
shard.SetRuntimeOptions(runtime.NewOptions().SetWriteNewSeriesAsync(true))
defer shard.Close()
ctx := context.NewContext()
defer ctx.Close()
- _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"),
+ seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"),
ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
-
- // wait till we're done indexing.
- indexed := xclock.WaitUntil(func() bool {
- return atomic.LoadInt32(&numCalls) == 1
- }, 2*time.Second)
- assert.True(t, indexed)
+ assert.True(t, seriesWrite.WasWritten)
+ assert.True(t, seriesWrite.NeedsIndex)
+
+ // mark as indexed
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(xtime.ToUnixNano(now.Truncate(blockSize)))
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(xtime.ToUnixNano(now.Truncate(blockSize)))
+
+ // make sure next block not marked as indexed
+ start := time.Now()
+ for time.Since(start) < 10*time.Second {
+ entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo"))
+ require.NoError(t, err)
+ if entry == nil {
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ assert.True(t, entry.IndexedForBlockStart(
+ xtime.ToUnixNano(now.Truncate(blockSize))))
+ break // done
+ }
- // ensure we index because it's expired
+ // ensure we would need to index next block because it's expired
nextWriteTime := now.Add(blockSize)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"),
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"),
ident.NewTagsIterator(ident.NewTags(ident.StringTag("name", "value"))),
nextWriteTime, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
-
- // wait till we're done indexing.
- reIndexed := xclock.WaitUntil(func() bool {
- return atomic.LoadInt32(&numCalls) == 2
- }, 2*time.Second)
- assert.True(t, reIndexed)
-
- entry, _, err := shard.tryRetrieveWritableSeries(ident.StringID("foo"))
- assert.NoError(t, err)
-
- // make sure we indexed the second write
- assert.True(t, entry.IndexedForBlockStart(
- xtime.ToUnixNano(nextWriteTime.Truncate(blockSize))))
+ assert.True(t, seriesWrite.WasWritten)
+ assert.True(t, seriesWrite.NeedsIndex)
}
// TODO(prateek): wire tests above to use the field `ts`
diff --git a/src/dbnode/storage/shard_insert_queue.go b/src/dbnode/storage/shard_insert_queue.go
index 0805d172d1..fe66bfd45a 100644
--- a/src/dbnode/storage/shard_insert_queue.go
+++ b/src/dbnode/storage/shard_insert_queue.go
@@ -22,20 +22,28 @@ package storage
import (
"errors"
+ "strconv"
"sync"
"time"
"github.com/m3db/m3/src/dbnode/clock"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/storage/series/lookup"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
+ xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
- "github.com/m3db/m3/src/dbnode/namespace"
"github.com/uber-go/tally"
+ "go.uber.org/atomic"
+ "go.uber.org/zap"
+)
+
+const (
+ resetShardInsertsEvery = 3 * time.Minute
)
var (
@@ -62,21 +70,23 @@ type dbShardInsertQueue struct {
// rate limits, protected by mutex
insertBatchBackoff time.Duration
- insertPerSecondLimit int
+ insertPerSecondLimit *atomic.Uint64
- insertPerSecondLimitWindowNanos int64
- insertPerSecondLimitWindowValues int
+ insertPerSecondLimitWindowNanos *atomic.Uint64
+ insertPerSecondLimitWindowValues *atomic.Uint64
currBatch *dbShardInsertBatch
notifyInsert chan struct{}
closeCh chan struct{}
metrics dbShardInsertQueueMetrics
+ logger *zap.Logger
}
type dbShardInsertQueueMetrics struct {
insertsNoPendingWrite tally.Counter
insertsPendingWrite tally.Counter
+ insertsBatchErrors tally.Counter
}
func newDatabaseShardInsertQueueMetrics(
@@ -91,69 +101,10 @@ func newDatabaseShardInsertQueueMetrics(
insertsPendingWrite: scope.Tagged(map[string]string{
insertPendingWriteTagName: "yes",
}).Counter(insertName),
+ insertsBatchErrors: scope.Counter("inserts-batch.errors"),
}
}
-type dbShardInsertBatch struct {
- wg *sync.WaitGroup
- inserts []dbShardInsert
-}
-
-type dbShardInsertAsyncOptions struct {
- pendingWrite dbShardPendingWrite
- pendingRetrievedBlock dbShardPendingRetrievedBlock
- pendingIndex dbShardPendingIndex
-
- hasPendingWrite bool
- hasPendingRetrievedBlock bool
- hasPendingIndexing bool
-
- // NB(prateek): `entryRefCountIncremented` indicates if the
- // entry provided along with the dbShardInsertAsyncOptions
- // already has it's ref count incremented. It's used to
- // correctly manage the lifecycle of the entry across the
- // shard -> shard Queue -> shard boundaries.
- entryRefCountIncremented bool
-}
-
-type dbShardInsert struct {
- entry *lookup.Entry
- opts dbShardInsertAsyncOptions
-}
-
-var dbShardInsertZeroed = dbShardInsert{}
-
-type dbShardPendingWrite struct {
- timestamp time.Time
- value float64
- unit xtime.Unit
- annotation checked.Bytes
- opts series.WriteOptions
-}
-
-type dbShardPendingIndex struct {
- timestamp time.Time
- enqueuedAt time.Time
-}
-
-type dbShardPendingRetrievedBlock struct {
- id ident.ID
- tags ident.TagIterator
- start time.Time
- segment ts.Segment
- nsCtx namespace.Context
-}
-
-func (b *dbShardInsertBatch) reset() {
- b.wg = &sync.WaitGroup{}
- // We always expect to be waiting for an insert
- b.wg.Add(1)
- for i := range b.inserts {
- b.inserts[i] = dbShardInsertZeroed
- }
- b.inserts = b.inserts[:0]
-}
-
type dbShardInsertEntryBatchFn func(inserts []dbShardInsert) error
// newDatabaseShardInsertQueue creates a new shard insert queue. The shard
@@ -175,26 +126,36 @@ func newDatabaseShardInsertQueue(
insertEntryBatchFn dbShardInsertEntryBatchFn,
nowFn clock.NowFn,
scope tally.Scope,
+ logger *zap.Logger,
) *dbShardInsertQueue {
- currBatch := &dbShardInsertBatch{}
- currBatch.reset()
- subscope := scope.SubScope("insert-queue")
+ scope = scope.SubScope("insert-queue")
+ currBatch := newDbShardInsertBatch(nowFn, scope)
return &dbShardInsertQueue{
nowFn: nowFn,
insertEntryBatchFn: insertEntryBatchFn,
sleepFn: time.Sleep,
currBatch: currBatch,
- notifyInsert: make(chan struct{}, 1),
- closeCh: make(chan struct{}, 1),
- metrics: newDatabaseShardInsertQueueMetrics(subscope),
+ // NB(r): Use 2 * num cores so that each CPU insert queue which
+ // is 1 per num CPU core can always enqueue a notification without
+ // it being lost.
+ notifyInsert: make(chan struct{}, 2*xsync.NumCores()),
+ closeCh: make(chan struct{}, 1),
+ insertPerSecondLimit: atomic.NewUint64(0),
+ insertPerSecondLimitWindowNanos: atomic.NewUint64(0),
+ insertPerSecondLimitWindowValues: atomic.NewUint64(0),
+ metrics: newDatabaseShardInsertQueueMetrics(scope),
+ logger: logger,
}
}
func (q *dbShardInsertQueue) SetRuntimeOptions(value runtime.Options) {
q.Lock()
q.insertBatchBackoff = value.WriteNewSeriesBackoffDuration()
- q.insertPerSecondLimit = value.WriteNewSeriesLimitPerShardPerSecond()
q.Unlock()
+
+ // Use atomics so no locks outside of per CPU core lock used.
+ v := uint64(value.WriteNewSeriesLimitPerShardPerSecond())
+ q.insertPerSecondLimit.Store(v)
}
func (q *dbShardInsertQueue) insertLoop() {
@@ -202,9 +163,12 @@ func (q *dbShardInsertQueue) insertLoop() {
close(q.closeCh)
}()
- var lastInsert time.Time
- freeBatch := &dbShardInsertBatch{}
- freeBatch.reset()
+ var (
+ lastInsert time.Time
+ allInserts []dbShardInsert
+ allInsertsLastReset time.Time
+ )
+ batch := newDbShardInsertBatch(q.nowFn, tally.NoopScope)
for range q.notifyInsert {
// Check if inserting too fast
elapsedSinceLastInsert := q.nowFn().Sub(lastInsert)
@@ -213,37 +177,51 @@ func (q *dbShardInsertQueue) insertLoop() {
var (
state dbShardInsertQueueState
backoff time.Duration
- batch *dbShardInsertBatch
)
q.Lock()
state = q.state
if elapsedSinceLastInsert < q.insertBatchBackoff {
// Need to backoff before rotate and insert
backoff = q.insertBatchBackoff - elapsedSinceLastInsert
- } else {
- // No backoff required, rotate and go
- batch = q.currBatch
- q.currBatch = freeBatch
}
q.Unlock()
if backoff > 0 {
q.sleepFn(backoff)
- q.Lock()
- // Rotate after backoff
- batch = q.currBatch
- q.currBatch = freeBatch
- q.Unlock()
}
- if len(batch.inserts) > 0 {
- q.insertEntryBatchFn(batch.inserts)
+ batchWg := q.currBatch.Rotate(batch)
+
+ // NB(r): Either reset (to avoid spikey allocations sticking around
+ // forever) or reuse existing slice.
+ now := q.nowFn()
+ if now.Sub(allInsertsLastReset) > resetShardInsertsEvery {
+ allInserts = nil
+ allInsertsLastReset = now
+ } else {
+ allInserts = allInserts[:0]
+ }
+ // Batch together for single insertion.
+ for _, batchByCPUCore := range batch.insertsByCPUCore {
+ batchByCPUCore.Lock()
+ allInserts = append(allInserts, batchByCPUCore.inserts...)
+ batchByCPUCore.Unlock()
+ }
+
+ err := q.insertEntryBatchFn(allInserts)
+ if err != nil {
+ q.metrics.insertsBatchErrors.Inc(1)
+ q.logger.Error("shard insert queue batch insert failed",
+ zap.Error(err))
}
- batch.wg.Done()
- // Set the free batch
- batch.reset()
- freeBatch = batch
+ batchWg.Done()
+
+ // Memset optimization to clear inserts holding refs to objects.
+ var insertZeroValue dbShardInsert
+ for i := range allInserts {
+ allInserts[i] = insertZeroValue
+ }
lastInsert = q.nowFn()
@@ -277,11 +255,11 @@ func (q *dbShardInsertQueue) Stop() error {
q.state = dbShardInsertQueueStateClosed
q.Unlock()
- // Final flush
+ // Final flush.
select {
case q.notifyInsert <- struct{}{}:
default:
- // Loop busy, already ready to consume notification
+ // Loop busy, already ready to consume notification.
}
// wait till other go routine is done
@@ -291,34 +269,42 @@ func (q *dbShardInsertQueue) Stop() error {
}
func (q *dbShardInsertQueue) Insert(insert dbShardInsert) (*sync.WaitGroup, error) {
- windowNanos := q.nowFn().Truncate(time.Second).UnixNano()
-
- q.Lock()
- if q.state != dbShardInsertQueueStateOpen {
- q.Unlock()
- return nil, errShardInsertQueueNotOpen
- }
- if limit := q.insertPerSecondLimit; limit > 0 {
- if q.insertPerSecondLimitWindowNanos != windowNanos {
- // Rolled into a new window
- q.insertPerSecondLimitWindowNanos = windowNanos
- q.insertPerSecondLimitWindowValues = 0
- }
- q.insertPerSecondLimitWindowValues++
- if q.insertPerSecondLimitWindowValues > limit {
- q.Unlock()
- return nil, errNewSeriesInsertRateLimitExceeded
+ if !insert.opts.skipRateLimit {
+ if limit := q.insertPerSecondLimit.Load(); limit > 0 {
+ windowNanos := uint64(q.nowFn().Truncate(time.Second).UnixNano())
+ currLimitWindowNanos := q.insertPerSecondLimitWindowNanos.Load()
+ if currLimitWindowNanos != windowNanos {
+ // Rolled into a new window.
+ if q.insertPerSecondLimitWindowNanos.CAS(currLimitWindowNanos, windowNanos) {
+ // If managed to set it to the new window, reset the counter
+ // otherwise another goroutine got to it first and
+ // will zero the counter.
+ q.insertPerSecondLimitWindowValues.Store(0)
+ }
+ }
+ if q.insertPerSecondLimitWindowValues.Inc() > uint64(limit) {
+ return nil, errNewSeriesInsertRateLimitExceeded
+ }
}
}
- q.currBatch.inserts = append(q.currBatch.inserts, insert)
- wg := q.currBatch.wg
- q.Unlock()
- // Notify insert loop
- select {
- case q.notifyInsert <- struct{}{}:
- default:
- // Loop busy, already ready to consume notification
+ inserts := q.currBatch.insertsByCPUCore[xsync.CPUCore()]
+ inserts.Lock()
+ // Track if first insert, if so then we need to notify insert loop,
+ // otherwise we already have a pending notification.
+ firstInsert := len(inserts.inserts) == 0
+ inserts.inserts = append(inserts.inserts, insert)
+ wg := inserts.wg
+ inserts.Unlock()
+
+ // Notify insert loop, only required if first to insert for this
+ // this CPU core.
+ if firstInsert {
+ select {
+ case q.notifyInsert <- struct{}{}:
+ default:
+ // Loop busy, already ready to consume notification.
+ }
}
if insert.opts.hasPendingWrite {
@@ -329,3 +315,176 @@ func (q *dbShardInsertQueue) Insert(insert dbShardInsert) (*sync.WaitGroup, erro
return wg, nil
}
+
+type dbShardInsertBatch struct {
+ nowFn clock.NowFn
+ wg *sync.WaitGroup
+ // Note: since inserts by CPU core is allocated when
+ // nsIndexInsertBatch is constructed and then never modified
+ // it is safe to concurently read (but not modify obviously).
+ insertsByCPUCore []*dbShardInsertsByCPUCore
+ lastReset time.Time
+}
+
+type dbShardInsertsByCPUCore struct {
+ sync.Mutex
+
+ wg *sync.WaitGroup
+ inserts []dbShardInsert
+ metrics dbShardInsertsByCPUCoreMetrics
+}
+
+type dbShardInsert struct {
+ entry *lookup.Entry
+ opts dbShardInsertAsyncOptions
+}
+
+type dbShardInsertAsyncOptions struct {
+ skipRateLimit bool
+
+ pendingWrite dbShardPendingWrite
+ pendingRetrievedBlock dbShardPendingRetrievedBlock
+ pendingIndex dbShardPendingIndex
+
+ hasPendingWrite bool
+ hasPendingRetrievedBlock bool
+ hasPendingIndexing bool
+
+ // NB(prateek): `entryRefCountIncremented` indicates if the
+ // entry provided along with the dbShardInsertAsyncOptions
+ // already has it's ref count incremented. It's used to
+ // correctly manage the lifecycle of the entry across the
+ // shard -> shard Queue -> shard boundaries.
+ entryRefCountIncremented bool
+}
+
+type dbShardPendingWrite struct {
+ timestamp time.Time
+ value float64
+ unit xtime.Unit
+ annotation checked.Bytes
+ opts series.WriteOptions
+}
+
+type dbShardPendingIndex struct {
+ timestamp time.Time
+ enqueuedAt time.Time
+}
+
+type dbShardPendingRetrievedBlock struct {
+ id ident.ID
+ tags ident.TagIterator
+ start time.Time
+ segment ts.Segment
+ nsCtx namespace.Context
+}
+
+func newDbShardInsertBatch(
+ nowFn clock.NowFn,
+ scope tally.Scope,
+) *dbShardInsertBatch {
+ b := &dbShardInsertBatch{
+ nowFn: nowFn,
+ wg: &sync.WaitGroup{},
+ }
+ numCores := xsync.NumCores()
+ for i := 0; i < numCores; i++ {
+ b.insertsByCPUCore = append(b.insertsByCPUCore, &dbShardInsertsByCPUCore{
+ wg: b.wg,
+ metrics: newDBShardInsertsByCPUCoreMetrics(i, scope),
+ })
+ }
+ b.Rotate(nil)
+ return b
+}
+
+type dbShardInsertsByCPUCoreMetrics struct {
+ rotateInserts tally.Counter
+}
+
+func newDBShardInsertsByCPUCoreMetrics(
+ cpuIndex int,
+ scope tally.Scope,
+) dbShardInsertsByCPUCoreMetrics {
+ scope = scope.Tagged(map[string]string{
+ "cpu-index": strconv.Itoa(cpuIndex),
+ })
+
+ return dbShardInsertsByCPUCoreMetrics{
+ rotateInserts: scope.Counter("rotate-inserts"),
+ }
+}
+
+func (b *dbShardInsertBatch) Rotate(target *dbShardInsertBatch) *sync.WaitGroup {
+ prevWg := b.wg
+
+ // We always expect to be waiting for an index.
+ b.wg = &sync.WaitGroup{}
+ b.wg.Add(1)
+
+ reset := false
+ now := b.nowFn()
+ if now.Sub(b.lastReset) > resetShardInsertsEvery {
+ // NB(r): Sometimes this can grow very high, so we reset it
+ // relatively frequently.
+ reset = true
+ b.lastReset = now
+ }
+
+ // Rotate to target if we need to.
+ for idx, inserts := range b.insertsByCPUCore {
+ if target == nil {
+ // No target to rotate with.
+ inserts.Lock()
+ // Reset
+ inserts.inserts = inserts.inserts[:0]
+ // Use new wait group.
+ inserts.wg = b.wg
+ inserts.Unlock()
+ continue
+ }
+
+ // First prepare the target to take the current batch's inserts.
+ targetInserts := target.insertsByCPUCore[idx]
+ targetInserts.Lock()
+
+ // Reset the target inserts since we'll take ref to them in a second.
+ var prevTargetInserts []dbShardInsert
+ if !reset {
+ // Only reuse if not resetting the allocation.
+ // memset optimization.
+ var zeroDbShardInsert dbShardInsert
+ for i := range targetInserts.inserts {
+ targetInserts.inserts[i] = zeroDbShardInsert
+ }
+ prevTargetInserts = targetInserts.inserts[:0]
+ }
+
+ // Lock the current batch inserts now ready to rotate to the target.
+ inserts.Lock()
+
+ // Update current slice refs to take target's inserts.
+ targetInserts.inserts = inserts.inserts
+ targetInserts.wg = inserts.wg
+
+ // Reuse the target's old slices.
+ inserts.inserts = prevTargetInserts
+
+ // Use new wait group.
+ inserts.wg = b.wg
+
+ // Unlock as early as possible for writes to keep enqueuing.
+ inserts.Unlock()
+
+ numTargetInserts := len(targetInserts.inserts)
+
+ // Now can unlock target inserts too.
+ targetInserts.Unlock()
+
+ if n := numTargetInserts; n > 0 {
+ inserts.metrics.rotateInserts.Inc(int64(n))
+ }
+ }
+
+ return prevWg
+}
diff --git a/src/dbnode/storage/shard_insert_queue_test.go b/src/dbnode/storage/shard_insert_queue_test.go
index 190e6a609b..1190a80bb3 100644
--- a/src/dbnode/storage/shard_insert_queue_test.go
+++ b/src/dbnode/storage/shard_insert_queue_test.go
@@ -32,6 +32,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/uber-go/tally"
+ "go.uber.org/zap"
)
func TestShardInsertQueueBatchBackoff(t *testing.T) {
@@ -57,6 +58,10 @@ func TestShardInsertQueueBatchBackoff(t *testing.T) {
insertProgressWgs[i].Add(1)
}
q := newDatabaseShardInsertQueue(func(value []dbShardInsert) error {
+ if len(inserts) == len(insertWgs) {
+ return nil // Overflow.
+ }
+
inserts = append(inserts, value)
insertWgs[len(inserts)-1].Done()
insertProgressWgs[len(inserts)-1].Wait()
@@ -65,7 +70,7 @@ func TestShardInsertQueueBatchBackoff(t *testing.T) {
timeLock.Lock()
defer timeLock.Unlock()
return currTime
- }, tally.NoopScope)
+ }, tally.NoopScope, zap.NewNop())
q.insertBatchBackoff = backoff
@@ -143,9 +148,9 @@ func TestShardInsertQueueRateLimit(t *testing.T) {
timeLock.Lock()
defer timeLock.Unlock()
return currTime
- }, tally.NoopScope)
+ }, tally.NoopScope, zap.NewNop())
- q.insertPerSecondLimit = 2
+ q.insertPerSecondLimit.Store(2)
require.NoError(t, q.Start())
defer func() {
@@ -186,9 +191,9 @@ func TestShardInsertQueueRateLimit(t *testing.T) {
require.NoError(t, err)
q.Lock()
- expectedCurrWindow := currTime.Truncate(time.Second).UnixNano()
- assert.Equal(t, expectedCurrWindow, q.insertPerSecondLimitWindowNanos)
- assert.Equal(t, 1, q.insertPerSecondLimitWindowValues)
+ expectedCurrWindow := uint64(currTime.Truncate(time.Second).UnixNano())
+ assert.Equal(t, expectedCurrWindow, q.insertPerSecondLimitWindowNanos.Load())
+ assert.Equal(t, uint64(1), q.insertPerSecondLimitWindowValues.Load())
q.Unlock()
}
@@ -204,7 +209,7 @@ func TestShardInsertQueueFlushedOnClose(t *testing.T) {
q := newDatabaseShardInsertQueue(func(value []dbShardInsert) error {
atomic.AddInt64(&numInsertObserved, int64(len(value)))
return nil
- }, func() time.Time { return currTime }, tally.NoopScope)
+ }, func() time.Time { return currTime }, tally.NoopScope, zap.NewNop())
require.NoError(t, q.Start())
diff --git a/src/dbnode/storage/shard_new_map_gen.go b/src/dbnode/storage/shard_new_map_gen.go
index 230e3cd471..4ee45e906a 100644
--- a/src/dbnode/storage/shard_new_map_gen.go
+++ b/src/dbnode/storage/shard_new_map_gen.go
@@ -28,7 +28,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/dbnode/storage/shard_race_prop_test.go b/src/dbnode/storage/shard_race_prop_test.go
index 29ad1002cf..3d90be156b 100644
--- a/src/dbnode/storage/shard_race_prop_test.go
+++ b/src/dbnode/storage/shard_race_prop_test.go
@@ -187,9 +187,9 @@ func testShardTickWriteRace(t *testing.T, tickBatchSize, numSeries int) {
<-barrier
ctx := context.NewContext()
now := time.Now()
- _, wasWritten, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
ctx.BlockingClose()
}()
}
@@ -273,7 +273,10 @@ func TestShardTickBootstrapWriteRace(t *testing.T) {
wg.Done()
}
- assert.NoError(t, shard.Bootstrap())
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ assert.NoError(t, shard.Bootstrap(ctx, namespace.Context{ID: ident.StringID("foo")}))
for _, id := range writeIDs {
id := id
go func() {
@@ -281,9 +284,9 @@ func TestShardTickBootstrapWriteRace(t *testing.T) {
<-barrier
ctx := context.NewContext()
now := time.Now()
- _, wasWritten, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err := shard.Write(ctx, id, now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
ctx.BlockingClose()
}()
}
diff --git a/src/dbnode/storage/shard_ref_count_test.go b/src/dbnode/storage/shard_ref_count_test.go
index d21235e548..1ca60a44ae 100644
--- a/src/dbnode/storage/shard_ref_count_test.go
+++ b/src/dbnode/storage/shard_ref_count_test.go
@@ -66,21 +66,21 @@ func testShardWriteSyncRefCount(t *testing.T, opts Options) {
ctx := context.NewContext()
defer ctx.Close()
- _, wasWritten, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.False(t, wasWritten)
+ assert.False(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
// ensure all entries have no references left
for _, id := range []string{"foo", "bar", "baz"} {
@@ -94,17 +94,17 @@ func testShardWriteSyncRefCount(t *testing.T, opts Options) {
// write already inserted series'
next := now.Add(time.Minute)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
// ensure all entries have no references left
for _, id := range []string{"foo", "bar", "baz"} {
@@ -122,7 +122,7 @@ func TestShardWriteTaggedSyncRefCountMockIndex(t *testing.T) {
blockSize := namespaceIndexOptions.BlockSize()
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
idx.EXPECT().BlockStartForWriteTime(gomock.Any()).
DoAndReturn(func(t time.Time) xtime.UnixNano {
return xtime.ToUnixNano(t.Truncate(blockSize))
@@ -164,7 +164,9 @@ func TestShardWriteTaggedSyncRefCountSyncIndex(t *testing.T) {
)
opts = opts.SetIndexOptions(indexOpts)
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, opts)
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, opts)
assert.NoError(t, err)
defer func() {
@@ -174,11 +176,11 @@ func TestShardWriteTaggedSyncRefCountSyncIndex(t *testing.T) {
testShardWriteTaggedSyncRefCount(t, idx)
}
-func testShardWriteTaggedSyncRefCount(t *testing.T, idx namespaceIndex) {
+func testShardWriteTaggedSyncRefCount(t *testing.T, idx NamespaceIndex) {
var (
now = time.Now()
opts = DefaultTestOptions()
- shard = testDatabaseShardWithIndexFn(t, opts, idx)
+ shard = testDatabaseShardWithIndexFn(t, opts, idx, false)
)
shard.SetRuntimeOptions(runtime.NewOptions().
@@ -188,17 +190,17 @@ func testShardWriteTaggedSyncRefCount(t *testing.T, idx namespaceIndex) {
ctx := context.NewContext()
defer ctx.Close()
- _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, now, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, now, 2.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, now, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, now, 3.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, now, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
// ensure all entries have no references left
for _, id := range []string{"foo", "bar", "baz"} {
@@ -212,17 +214,17 @@ func testShardWriteTaggedSyncRefCount(t *testing.T, idx namespaceIndex) {
// write already inserted series'
next := now.Add(time.Minute)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
// ensure all entries have no references left
for _, id := range []string{"foo", "bar", "baz"} {
@@ -256,17 +258,17 @@ func TestShardWriteAsyncRefCount(t *testing.T) {
ctx := context.NewContext()
defer ctx.Close()
- _, wasWritten, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err := shard.Write(ctx, ident.StringID("foo"), now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), now, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), now, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
inserted := xclock.WaitUntil(func() bool {
counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"]
@@ -286,17 +288,17 @@ func TestShardWriteAsyncRefCount(t *testing.T) {
// write already inserted series'
next := now.Add(time.Minute)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("foo"), next, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("bar"), next, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.Write(ctx, ident.StringID("baz"), next, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
// ensure all entries have no references left
for _, id := range []string{"foo", "bar", "baz"} {
@@ -325,7 +327,7 @@ func TestShardWriteTaggedAsyncRefCountMockIndex(t *testing.T) {
blockSize := namespaceIndexOptions.BlockSize()
- idx := NewMocknamespaceIndex(ctrl)
+ idx := NewMockNamespaceIndex(ctrl)
idx.EXPECT().BlockStartForWriteTime(gomock.Any()).
DoAndReturn(func(t time.Time) xtime.UnixNano {
return xtime.ToUnixNano(t.Truncate(blockSize))
@@ -370,7 +372,9 @@ func TestShardWriteTaggedAsyncRefCountSyncIndex(t *testing.T) {
SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
opts = opts.SetIndexOptions(indexOpts)
- idx, err := newNamespaceIndexWithInsertQueueFn(md, testShardSet, newFn, opts)
+ idx, err := newNamespaceIndexWithInsertQueueFn(md,
+ namespace.NewRuntimeOptionsManager(md.ID().String()),
+ testShardSet, newFn, opts)
assert.NoError(t, err)
defer func() {
@@ -380,7 +384,7 @@ func TestShardWriteTaggedAsyncRefCountSyncIndex(t *testing.T) {
testShardWriteTaggedAsyncRefCount(t, idx, nowFn)
}
-func testShardWriteTaggedAsyncRefCount(t *testing.T, idx namespaceIndex, nowFn func() time.Time) {
+func testShardWriteTaggedAsyncRefCount(t *testing.T, idx NamespaceIndex, nowFn func() time.Time) {
testReporter := xmetrics.NewTestStatsReporter(xmetrics.NewTestStatsReporterOptions())
scope, closer := tally.NewRootScope(tally.ScopeOptions{
Reporter: testReporter,
@@ -400,7 +404,7 @@ func testShardWriteTaggedAsyncRefCount(t *testing.T, idx namespaceIndex, nowFn f
opts = opts.
SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
- shard := testDatabaseShardWithIndexFn(t, opts, idx)
+ shard := testDatabaseShardWithIndexFn(t, opts, idx, false)
shard.SetRuntimeOptions(runtime.NewOptions().
SetWriteNewSeriesAsync(true))
defer shard.Close()
@@ -408,20 +412,29 @@ func testShardWriteTaggedAsyncRefCount(t *testing.T, idx namespaceIndex, nowFn f
ctx := context.NewContext()
defer ctx.Close()
- _, wasWritten, err := shard.WriteTagged(ctx, ident.StringID("foo"),
+ seriesWrite, err := shard.WriteTagged(ctx, ident.StringID("foo"),
ident.EmptyTagIterator, now, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
+ assert.True(t, seriesWrite.NeedsIndex)
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(idx.BlockStartForWriteTime(now))
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(idx.BlockStartForWriteTime(now))
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"),
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"),
ident.EmptyTagIterator, now, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
+ assert.True(t, seriesWrite.NeedsIndex)
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(idx.BlockStartForWriteTime(now))
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(idx.BlockStartForWriteTime(now))
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"),
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"),
ident.EmptyTagIterator, now, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
+ assert.True(t, seriesWrite.NeedsIndex)
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(idx.BlockStartForWriteTime(now))
+ seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(idx.BlockStartForWriteTime(now))
inserted := xclock.WaitUntil(func() bool {
counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"]
@@ -441,17 +454,17 @@ func testShardWriteTaggedAsyncRefCount(t *testing.T, idx namespaceIndex, nowFn f
// write already inserted series'
next := now.Add(time.Minute)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("foo"), ident.EmptyTagIterator, next, 1.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("bar"), ident.EmptyTagIterator, next, 2.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
- _, wasWritten, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{})
+ seriesWrite, err = shard.WriteTagged(ctx, ident.StringID("baz"), ident.EmptyTagIterator, next, 3.0, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.True(t, wasWritten)
+ assert.True(t, seriesWrite.WasWritten)
// ensure all entries have no references left
for _, id := range []string{"foo", "bar", "baz"} {
diff --git a/src/dbnode/storage/shard_test.go b/src/dbnode/storage/shard_test.go
index 4ea1ee4870..06396b8274 100644
--- a/src/dbnode/storage/shard_test.go
+++ b/src/dbnode/storage/shard_test.go
@@ -45,6 +45,7 @@ import (
"github.com/m3db/m3/src/dbnode/ts"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
@@ -68,15 +69,16 @@ func (i *testIncreasingIndex) nextIndex() uint64 {
}
func testDatabaseShard(t *testing.T, opts Options) *dbShard {
- return testDatabaseShardWithIndexFn(t, opts, nil)
+ return testDatabaseShardWithIndexFn(t, opts, nil, false)
}
func testDatabaseShardWithIndexFn(
t *testing.T,
opts Options,
- idx namespaceIndex,
+ idx NamespaceIndex,
+ coldWritesEnabled bool,
) *dbShard {
- metadata, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts)
+ metadata, err := namespace.NewMetadata(defaultTestNs1ID, defaultTestNs1Opts.SetColdWritesEnabled(coldWritesEnabled))
require.NoError(t, err)
nsReaderMgr := newNamespaceReaderManager(metadata, tally.NoopScope, opts)
seriesOpts := NewSeriesOptionsFromOptions(opts, defaultTestNs1Opts.RetentionOptions()).
@@ -89,7 +91,6 @@ func testDatabaseShardWithIndexFn(
func addMockSeries(ctrl *gomock.Controller, shard *dbShard, id ident.ID, tags ident.Tags, index uint64) *series.MockDatabaseSeries {
series := series.NewMockDatabaseSeries(ctrl)
series.EXPECT().ID().Return(id).AnyTimes()
- series.EXPECT().Tags().Return(tags).AnyTimes()
series.EXPECT().IsEmpty().Return(false).AnyTimes()
shard.Lock()
shard.insertNewShardEntryWithLock(lookup.NewEntry(series, index))
@@ -127,8 +128,13 @@ func TestShardBootstrapState(t *testing.T) {
opts := DefaultTestOptions()
s := testDatabaseShard(t, opts)
defer s.Close()
- require.NoError(t, s.Bootstrap())
- require.Error(t, s.Bootstrap())
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ require.NoError(t, s.Bootstrap(ctx, nsCtx))
+ require.Error(t, s.Bootstrap(ctx, nsCtx))
}
func TestShardFlushStateNotStarted(t *testing.T) {
@@ -154,7 +160,12 @@ func TestShardFlushStateNotStarted(t *testing.T) {
s := testDatabaseShard(t, opts)
defer s.Close()
- s.Bootstrap()
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ s.Bootstrap(ctx, nsCtx)
notStarted := fileOpState{WarmStatus: fileOpNotStarted}
for st := earliest; !st.After(latest); st = st.Add(ropts.BlockSize()) {
@@ -192,6 +203,7 @@ func TestShardBootstrapWithFlushVersion(t *testing.T) {
mockSeries := series.NewMockDatabaseSeries(ctrl)
mockSeries.EXPECT().ID().Return(mockSeriesID).AnyTimes()
mockSeries.EXPECT().IsEmpty().Return(false).AnyTimes()
+ mockSeries.EXPECT().Bootstrap(gomock.Any())
// Load the mock into the shard as an expected series so that we can assert
// on the call to its Bootstrap() method below.
@@ -221,7 +233,11 @@ func TestShardBootstrapWithFlushVersion(t *testing.T) {
require.NoError(t, writer.Close())
}
- err = s.Bootstrap()
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ err = s.Bootstrap(ctx, nsCtx)
require.NoError(t, err)
require.Equal(t, Bootstrapped, s.bootstrapState)
@@ -282,7 +298,11 @@ func TestShardBootstrapWithFlushVersionNoCleanUp(t *testing.T) {
require.NoError(t, writer.Close())
}
- err = s.Bootstrap()
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ err = s.Bootstrap(ctx, nsCtx)
require.NoError(t, err)
require.Equal(t, Bootstrapped, s.bootstrapState)
@@ -308,20 +328,20 @@ func TestShardBootstrapWithCacheShardIndices(t *testing.T) {
newClOpts = opts.
CommitLogOptions().
SetFilesystemOptions(fsOpts)
- mockRetriever = block.NewMockDatabaseBlockRetriever(ctrl)
- mockRetrieverMgr = block.NewMockDatabaseBlockRetrieverManager(ctrl)
+ mockRetriever = block.NewMockDatabaseBlockRetriever(ctrl)
)
- opts = opts.
- SetCommitLogOptions(newClOpts).
- SetDatabaseBlockRetrieverManager(mockRetrieverMgr)
+ opts = opts.SetCommitLogOptions(newClOpts)
s := testDatabaseShard(t, opts)
defer s.Close()
-
mockRetriever.EXPECT().CacheShardIndices([]uint32{s.ID()}).Return(nil)
- mockRetrieverMgr.EXPECT().Retriever(s.namespace).Return(mockRetriever, nil)
+ s.setBlockRetriever(mockRetriever)
+
+ ctx := context.NewContext()
+ defer ctx.Close()
- err = s.Bootstrap()
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ err = s.Bootstrap(ctx, nsCtx)
require.NoError(t, err)
require.Equal(t, Bootstrapped, s.bootstrapState)
}
@@ -368,7 +388,12 @@ func testShardLoadLimit(t *testing.T, limit int64, shouldReturnError bool) {
sr.AddBlock(ident.StringID("bar"), barTags, blocks[1])
seriesMap := sr.AllSeries()
- require.NoError(t, s.Bootstrap())
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ require.NoError(t, s.Bootstrap(ctx, nsCtx))
// First load will never trigger the limit.
require.NoError(t, s.LoadBlocks(seriesMap))
@@ -388,7 +413,13 @@ func TestShardFlushSeriesFlushError(t *testing.T) {
s := testDatabaseShard(t, DefaultTestOptions())
defer s.Close()
- s.Bootstrap()
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ s.Bootstrap(ctx, nsCtx)
+
s.flushState.statesByTime[xtime.ToUnixNano(blockStart)] = fileOpState{
WarmStatus: fileOpFailed,
NumFailures: 1,
@@ -397,7 +428,7 @@ func TestShardFlushSeriesFlushError(t *testing.T) {
var closed bool
flush := persist.NewMockFlushPreparer(ctrl)
prepared := persist.PreparedDataPersist{
- Persist: func(ident.ID, ident.Tags, ts.Segment, uint32) error { return nil },
+ Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil },
Close: func() error { closed = true; return nil },
}
prepareOpts := xtest.CmpMatcher(persist.DataPrepareOptions{
@@ -457,9 +488,16 @@ func TestShardFlushSeriesFlushSuccess(t *testing.T) {
}
opts := DefaultTestOptions()
opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
+
s := testDatabaseShard(t, opts)
defer s.Close()
- s.Bootstrap()
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ s.Bootstrap(ctx, nsCtx)
+
s.flushState.statesByTime[xtime.ToUnixNano(blockStart)] = fileOpState{
WarmStatus: fileOpFailed,
NumFailures: 1,
@@ -468,7 +506,7 @@ func TestShardFlushSeriesFlushSuccess(t *testing.T) {
var closed bool
flush := persist.NewMockFlushPreparer(ctrl)
prepared := persist.PreparedDataPersist{
- Persist: func(ident.ID, ident.Tags, ts.Segment, uint32) error { return nil },
+ Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil },
Close: func() error { closed = true; return nil },
}
@@ -532,7 +570,7 @@ func TestShardColdFlush(t *testing.T) {
require.NoError(t, err)
defer os.RemoveAll(dir)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
now := time.Now()
nowFn := func() time.Time {
@@ -548,7 +586,12 @@ func TestShardColdFlush(t *testing.T) {
blockSize := opts.SeriesOptions().RetentionOptions().BlockSize()
shard := testDatabaseShard(t, opts)
- require.NoError(t, shard.Bootstrap())
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ require.NoError(t, shard.Bootstrap(ctx, nsCtx))
shard.newMergerFn = newMergerTestFn
shard.newFSMergeWithMemFn = newFSMergeWithMemTestFn
@@ -580,7 +623,8 @@ func TestShardColdFlush(t *testing.T) {
}
for _, ds := range dirtyData {
curr := series.NewMockDatabaseSeries(ctrl)
- curr.EXPECT().ID().Return(ds.id)
+ curr.EXPECT().ID().Return(ds.id).AnyTimes()
+ curr.EXPECT().Metadata().Return(doc.Document{ID: ds.id.Bytes()}).AnyTimes()
curr.EXPECT().ColdFlushBlockStarts(gomock.Any()).
Return(optimizedTimesFromTimes(ds.dirtyTimes))
shard.list.PushBack(lookup.NewEntry(curr, 0))
@@ -589,12 +633,11 @@ func TestShardColdFlush(t *testing.T) {
preparer := persist.NewMockFlushPreparer(ctrl)
fsReader := fs.NewMockDataFileSetReader(ctrl)
resources := coldFlushReuseableResources{
- dirtySeries: newDirtySeriesMap(dirtySeriesMapOptions{}),
+ dirtySeries: newDirtySeriesMap(),
dirtySeriesToWrite: make(map[xtime.UnixNano]*idList),
idElementPool: newIDElementPool(nil),
fsReader: fsReader,
}
- nsCtx := namespace.Context{}
// Assert that flush state cold versions all start at 0.
for i := t0; i.Before(t7.Add(blockSize)); i = i.Add(blockSize) {
@@ -602,8 +645,9 @@ func TestShardColdFlush(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, coldVersion)
}
- err = shard.ColdFlush(preparer, resources, nsCtx)
+ shardColdFlush, err := shard.ColdFlush(preparer, resources, nsCtx, &persist.NoOpColdFlushNamespace{})
require.NoError(t, err)
+ require.NoError(t, shardColdFlush.Done())
// After a cold flush, t0-t6 previously dirty block starts should be updated
// to version 1.
for i := t0; i.Before(t6.Add(blockSize)); i = i.Add(blockSize) {
@@ -628,7 +672,13 @@ func TestShardColdFlushNoMergeIfNothingDirty(t *testing.T) {
opts = opts.SetClockOptions(opts.ClockOptions().SetNowFn(nowFn))
blockSize := opts.SeriesOptions().RetentionOptions().BlockSize()
shard := testDatabaseShard(t, opts)
- require.NoError(t, shard.Bootstrap())
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ require.NoError(t, shard.Bootstrap(ctx, nsCtx))
+
shard.newMergerFn = newMergerTestFn
shard.newFSMergeWithMemFn = newFSMergeWithMemTestFn
@@ -656,14 +706,15 @@ func TestShardColdFlushNoMergeIfNothingDirty(t *testing.T) {
dirtySeriesToWrite[xtime.ToUnixNano(t3)] = newIDList(idElementPool)
resources := coldFlushReuseableResources{
- dirtySeries: newDirtySeriesMap(dirtySeriesMapOptions{}),
+ dirtySeries: newDirtySeriesMap(),
dirtySeriesToWrite: dirtySeriesToWrite,
idElementPool: idElementPool,
fsReader: fsReader,
}
- nsCtx := namespace.Context{}
- shard.ColdFlush(preparer, resources, nsCtx)
+ shardColdFlush, err := shard.ColdFlush(preparer, resources, nsCtx, &persist.NoOpColdFlushNamespace{})
+ require.NoError(t, err)
+ require.NoError(t, shardColdFlush.Done())
// After a cold flush, t0-t3 should remain version 0, since nothing should
// actually be merged.
for i := t0; i.Before(t3.Add(blockSize)); i = i.Add(blockSize) {
@@ -674,14 +725,15 @@ func TestShardColdFlushNoMergeIfNothingDirty(t *testing.T) {
}
func newMergerTestFn(
- reader fs.DataFileSetReader,
- blockAllocSize int,
- srPool xio.SegmentReaderPool,
- multiIterPool encoding.MultiReaderIteratorPool,
- identPool ident.Pool,
- encoderPool encoding.EncoderPool,
- contextPool context.Pool,
- nsOpts namespace.Options,
+ _ fs.DataFileSetReader,
+ _ int,
+ _ xio.SegmentReaderPool,
+ _ encoding.MultiReaderIteratorPool,
+ _ ident.Pool,
+ _ encoding.EncoderPool,
+ _ context.Pool,
+ _ string,
+ _ namespace.Options,
) fs.Merger {
return &noopMerger{}
}
@@ -689,42 +741,36 @@ func newMergerTestFn(
type noopMerger struct{}
func (m *noopMerger) Merge(
- fileID fs.FileSetFileIdentifier,
- mergeWith fs.MergeWith,
- nextVersion int,
- flushPreparer persist.FlushPreparer,
- nsCtx namespace.Context,
+ _ fs.FileSetFileIdentifier,
+ _ fs.MergeWith,
+ _ int,
+ _ persist.FlushPreparer,
+ _ namespace.Context,
+ _ persist.OnFlushSeries,
+) (persist.DataCloser, error) {
+ closer := func() error { return nil }
+ return closer, nil
+}
+
+func (m *noopMerger) MergeAndCleanup(
+ _ fs.FileSetFileIdentifier,
+ _ fs.MergeWith,
+ _ int,
+ _ persist.FlushPreparer,
+ _ namespace.Context,
+ _ persist.OnFlushSeries,
+ _ bool,
) error {
return nil
}
func newFSMergeWithMemTestFn(
- shard databaseShard,
- retriever series.QueryableBlockRetriever,
- dirtySeries *dirtySeriesMap,
- dirtySeriesToWrite map[xtime.UnixNano]*idList,
+ _ databaseShard,
+ _ series.QueryableBlockRetriever,
+ _ *dirtySeriesMap,
+ _ map[xtime.UnixNano]*idList,
) fs.MergeWith {
- return &noopMergeWith{}
-}
-
-type noopMergeWith struct{}
-
-func (m *noopMergeWith) Read(
- ctx context.Context,
- seriesID ident.ID,
- blockStart xtime.UnixNano,
- nsCtx namespace.Context,
-) ([]xio.BlockReader, bool, error) {
- return nil, false, nil
-}
-
-func (m *noopMergeWith) ForEachRemaining(
- ctx context.Context,
- blockStart xtime.UnixNano,
- fn fs.ForEachRemainingFn,
- nsCtx namespace.Context,
-) error {
- return nil
+ return fs.NewNoopMergeWith()
}
func TestShardSnapshotShardNotBootstrapped(t *testing.T) {
@@ -738,7 +784,7 @@ func TestShardSnapshotShardNotBootstrapped(t *testing.T) {
s.bootstrapState = Bootstrapping
snapshotPreparer := persist.NewMockSnapshotPreparer(ctrl)
- err := s.Snapshot(blockStart, blockStart, snapshotPreparer, namespace.Context{})
+ _, err := s.Snapshot(blockStart, blockStart, snapshotPreparer, namespace.Context{})
require.Equal(t, errShardNotBootstrappedToSnapshot, err)
}
@@ -755,7 +801,7 @@ func TestShardSnapshotSeriesSnapshotSuccess(t *testing.T) {
var closed bool
snapshotPreparer := persist.NewMockSnapshotPreparer(ctrl)
prepared := persist.PreparedDataPersist{
- Persist: func(ident.ID, ident.Tags, ts.Segment, uint32) error { return nil },
+ Persist: func(persist.Metadata, ts.Segment, uint32) error { return nil },
Close: func() error { closed = true; return nil },
}
@@ -773,20 +819,20 @@ func TestShardSnapshotSeriesSnapshotSuccess(t *testing.T) {
snapshotted := make(map[int]struct{})
for i := 0; i < 2; i++ {
i := i
- series := series.NewMockDatabaseSeries(ctrl)
- series.EXPECT().ID().Return(ident.StringID("foo" + strconv.Itoa(i))).AnyTimes()
- series.EXPECT().IsEmpty().Return(false).AnyTimes()
- series.EXPECT().
+ entry := series.NewMockDatabaseSeries(ctrl)
+ entry.EXPECT().ID().Return(ident.StringID("foo" + strconv.Itoa(i))).AnyTimes()
+ entry.EXPECT().IsEmpty().Return(false).AnyTimes()
+ entry.EXPECT().IsBufferEmptyAtBlockStart(blockStart).Return(false).AnyTimes()
+ entry.EXPECT().
Snapshot(gomock.Any(), blockStart, gomock.Any(), gomock.Any()).
Do(func(context.Context, time.Time, persist.DataFn, namespace.Context) {
snapshotted[i] = struct{}{}
}).
- Return(nil)
- s.list.PushBack(lookup.NewEntry(series, 0))
+ Return(series.SnapshotResult{}, nil)
+ s.list.PushBack(lookup.NewEntry(entry, 0))
}
- err := s.Snapshot(blockStart, blockStart, snapshotPreparer, namespace.Context{})
-
+ _, err := s.Snapshot(blockStart, blockStart, snapshotPreparer, namespace.Context{})
require.Equal(t, len(snapshotted), 2)
for i := 0; i < 2; i++ {
_, ok := snapshotted[i]
@@ -836,13 +882,13 @@ func writeShardAndVerify(
expectedShouldWrite bool,
expectedIdx uint64,
) {
- series, wasWritten, err := shard.Write(ctx, ident.StringID(id),
+ seriesWrite, err := shard.Write(ctx, ident.StringID(id),
now, value, xtime.Second, nil, series.WriteOptions{})
assert.NoError(t, err)
- assert.Equal(t, expectedShouldWrite, wasWritten)
- assert.Equal(t, id, series.ID.String())
- assert.Equal(t, "testns1", series.Namespace.String())
- assert.Equal(t, expectedIdx, series.UniqueIndex)
+ assert.Equal(t, expectedShouldWrite, seriesWrite.WasWritten)
+ assert.Equal(t, id, seriesWrite.Series.ID.String())
+ assert.Equal(t, "testns1", seriesWrite.Series.Namespace.String())
+ assert.Equal(t, expectedIdx, seriesWrite.Series.UniqueIndex)
}
func TestShardTick(t *testing.T) {
@@ -881,8 +927,12 @@ func TestShardTick(t *testing.T) {
sleepPerSeries := time.Microsecond
+ ctx := context.NewContext()
+ defer ctx.Close()
+
shard := testDatabaseShard(t, opts)
- shard.Bootstrap()
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ shard.Bootstrap(ctx, nsCtx)
shard.SetRuntimeOptions(runtime.NewOptions().
SetTickPerSeriesSleepDuration(sleepPerSeries).
SetTickSeriesBatchSize(1))
@@ -906,9 +956,6 @@ func TestShardTick(t *testing.T) {
setNow(nowFn().Add(t))
}
- ctx := context.NewContext()
- defer ctx.Close()
-
writeShardAndVerify(ctx, t, shard, "foo", nowFn(), 1.0, true, 0)
// same time, different value should write
writeShardAndVerify(ctx, t, shard, "foo", nowFn(), 2.0, true, 0)
@@ -1050,8 +1097,12 @@ func testShardWriteAsync(t *testing.T, writes []testWrite) {
sleepPerSeries := time.Microsecond
+ ctx := context.NewContext()
+ defer ctx.Close()
+
shard := testDatabaseShard(t, opts)
- shard.Bootstrap()
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ shard.Bootstrap(ctx, nsCtx)
shard.SetRuntimeOptions(runtime.NewOptions().
SetWriteNewSeriesAsync(true).
SetTickPerSeriesSleepDuration(sleepPerSeries).
@@ -1076,9 +1127,6 @@ func testShardWriteAsync(t *testing.T, writes []testWrite) {
setNow(nowFn().Add(t))
}
- ctx := context.NewContext()
- defer ctx.Close()
-
for _, write := range writes {
shard.Write(ctx, ident.StringID(write.id), nowFn(), write.value, write.unit, write.annotation, series.WriteOptions{})
}
@@ -1101,6 +1149,18 @@ func testShardWriteAsync(t *testing.T, writes []testWrite) {
require.Equal(t, 1, len(shard.flushState.statesByTime))
_, ok := shard.flushState.statesByTime[xtime.ToUnixNano(earliestFlush)]
require.True(t, ok)
+
+ // Verify the documents in the shard's series are present.
+ for _, w := range writes {
+ doc, exists, err := shard.DocRef(ident.StringID(w.id))
+ require.NoError(t, err)
+ require.True(t, exists)
+ require.Equal(t, w.id, string(doc.ID))
+ }
+ document, exists, err := shard.DocRef(ident.StringID("NOT_PRESENT_ID"))
+ require.NoError(t, err)
+ require.False(t, exists)
+ require.Equal(t, doc.Document{}, document)
}
// This tests a race in shard ticking with an empty series pending expiration.
@@ -1108,7 +1168,12 @@ func TestShardTickRace(t *testing.T) {
opts := DefaultTestOptions()
shard := testDatabaseShard(t, opts)
defer shard.Close()
- shard.Bootstrap()
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ shard.Bootstrap(ctx, nsCtx)
addTestSeries(shard, ident.StringID("foo"))
var wg sync.WaitGroup
@@ -1136,8 +1201,14 @@ func TestShardTickRace(t *testing.T) {
// we had while trying to purge as a concurrent read.
func TestShardTickCleanupSmallBatchSize(t *testing.T) {
opts := DefaultTestOptions()
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
shard := testDatabaseShard(t, opts)
- shard.Bootstrap()
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ shard.Bootstrap(ctx, nsCtx)
+
addTestSeries(shard, ident.StringID("foo"))
shard.Tick(context.NewNoOpCanncellable(), time.Now(), namespace.Context{})
require.Equal(t, 0, shard.lookup.Len())
@@ -1159,8 +1230,12 @@ func TestShardReturnsErrorForConcurrentTicks(t *testing.T) {
SetCommitLogOptions(opts.CommitLogOptions().
SetFilesystemOptions(fsOpts))
+ ctx := context.NewContext()
+ defer ctx.Close()
+
shard := testDatabaseShard(t, opts)
- shard.Bootstrap()
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ shard.Bootstrap(ctx, nsCtx)
shard.currRuntimeOptions.tickSleepSeriesBatchSize = 1
shard.currRuntimeOptions.tickSleepPerSeries = time.Millisecond
@@ -1318,7 +1393,7 @@ func TestPurgeExpiredSeriesWriteAfterTicking(t *testing.T) {
s.EXPECT().Tick(gomock.Any(), gomock.Any()).Do(func(interface{}, interface{}) {
// Emulate a write taking place just after tick for this series
s.EXPECT().Write(gomock.Any(), gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil)
+ gomock.Any(), gomock.Any(), gomock.Any()).Return(true, series.WarmWrite, nil)
ctx := opts.ContextPool().Get()
nowFn := opts.ClockOptions().NowFn()
@@ -1511,7 +1586,12 @@ func TestShardReadEncodedCachesSeriesWithRecentlyReadPolicy(t *testing.T) {
shard := testDatabaseShard(t, opts)
defer shard.Close()
- require.NoError(t, shard.Bootstrap())
+
+ ctx := context.NewContext()
+ defer ctx.Close()
+
+ nsCtx := namespace.Context{ID: ident.StringID("foo")}
+ require.NoError(t, shard.Bootstrap(ctx, nsCtx))
ropts := shard.seriesOpts.RetentionOptions()
end := opts.ClockOptions().NowFn()().Truncate(ropts.BlockSize())
@@ -1523,8 +1603,8 @@ func TestShardReadEncodedCachesSeriesWithRecentlyReadPolicy(t *testing.T) {
shard.setBlockRetriever(retriever)
segments := []ts.Segment{
- ts.NewSegment(checked.NewBytes([]byte("bar"), nil), nil, ts.FinalizeNone),
- ts.NewSegment(checked.NewBytes([]byte("baz"), nil), nil, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte("bar"), nil), nil, 0, ts.FinalizeNone),
+ ts.NewSegment(checked.NewBytes([]byte("baz"), nil), nil, 1, ts.FinalizeNone),
}
var blockReaders []xio.BlockReader
@@ -1537,9 +1617,6 @@ func TestShardReadEncodedCachesSeriesWithRecentlyReadPolicy(t *testing.T) {
blockReaders = append(blockReaders, block)
}
- ctx := opts.ContextPool().Get()
- defer ctx.Close()
-
mid := start.Add(ropts.BlockSize())
retriever.EXPECT().
@@ -1594,7 +1671,7 @@ func TestShardReadEncodedCachesSeriesWithRecentlyReadPolicy(t *testing.T) {
}
func TestShardNewInvalidShardEntry(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
shard := testDatabaseShard(t, DefaultTestOptions())
@@ -1603,8 +1680,7 @@ func TestShardNewInvalidShardEntry(t *testing.T) {
iter := ident.NewMockTagIterator(ctrl)
gomock.InOrder(
iter.EXPECT().Duplicate().Return(iter),
- iter.EXPECT().CurrentIndex().Return(0),
- iter.EXPECT().Len().Return(0),
+ iter.EXPECT().Remaining().Return(8),
iter.EXPECT().Next().Return(false),
iter.EXPECT().Err().Return(fmt.Errorf("random err")),
iter.EXPECT().Close(),
@@ -1631,7 +1707,7 @@ func TestShardNewValidShardEntry(t *testing.T) {
// either to retry inserting a series or to finalize the tags at the
// end of a request/response cycle or from a disk retrieve cycle.
func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
shard := testDatabaseShard(t, DefaultTestOptions())
@@ -1645,7 +1721,6 @@ func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) {
// Ensure copied with call to bytes but no close call, etc
id := ident.NewMockID(ctrl)
- id.EXPECT().IsNoFinalize().Times(1).Return(false)
id.EXPECT().Bytes().Times(1).Return(seriesID.Bytes())
iter := ident.NewMockTagIterator(ctrl)
@@ -1673,82 +1748,6 @@ func TestShardNewEntryDoesNotAlterIDOrTags(t *testing.T) {
assert.True(t, entry.Series.ID().Equal(seriesID))
// NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section
assert.False(t, unsafe.Pointer(&entryIDBytes[0]) == unsafe.Pointer(&seriesIDBytes[0]))
-
- // Ensure Tags equal and NOT same ref for tags
- assert.True(t, entry.Series.Tags().Equal(seriesTags))
- require.Equal(t, 1, len(entry.Series.Tags().Values()))
-
- entryTagNameBytes := entry.Series.Tags().Values()[0].Name.Bytes()
- entryTagValueBytes := entry.Series.Tags().Values()[0].Value.Bytes()
- seriesTagNameBytes := seriesTags.Values()[0].Name.Bytes()
- seriesTagValueBytes := seriesTags.Values()[0].Value.Bytes()
-
- // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section
- assert.False(t, unsafe.Pointer(&entryTagNameBytes[0]) == unsafe.Pointer(&seriesTagNameBytes[0]))
- assert.False(t, unsafe.Pointer(&entryTagValueBytes[0]) == unsafe.Pointer(&seriesTagValueBytes[0]))
-}
-
-// TestShardNewEntryTakesRefToNoFinalizeID ensures that when an ID is
-// marked as NoFinalize that newShardEntry simply takes a ref as it can
-// safely be assured the ID is not pooled.
-func TestShardNewEntryTakesRefToNoFinalizeID(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- shard := testDatabaseShard(t, DefaultTestOptions())
- defer shard.Close()
-
- seriesID := ident.BytesID([]byte("foo+bar=baz"))
- seriesTags := ident.NewTags(ident.Tag{
- Name: ident.StringID("bar"),
- Value: ident.StringID("baz"),
- })
-
- // Ensure copied with call to bytes but no close call, etc
- id := ident.NewMockID(ctrl)
- id.EXPECT().IsNoFinalize().Times(1).Return(true)
- id.EXPECT().Bytes().Times(1).Return(seriesID.Bytes())
-
- iter := ident.NewMockTagIterator(ctrl)
-
- // Ensure duplicate called but no close, etc
- iter.EXPECT().
- Duplicate().
- Times(1).
- Return(ident.NewTagsIterator(seriesTags))
-
- entry, err := shard.newShardEntry(id, newTagsIterArg(iter))
- require.NoError(t, err)
-
- shard.Lock()
- shard.insertNewShardEntryWithLock(entry)
- shard.Unlock()
-
- entry, _, err = shard.tryRetrieveWritableSeries(seriesID)
- require.NoError(t, err)
-
- assert.True(t, entry.Series.ID().Equal(seriesID))
-
- entryIDBytes := entry.Series.ID().Bytes()
- seriesIDBytes := seriesID.Bytes()
-
- // Ensure ID equal and same ref
- assert.True(t, entry.Series.ID().Equal(seriesID))
- // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section
- assert.True(t, unsafe.Pointer(&entryIDBytes[0]) == unsafe.Pointer(&seriesIDBytes[0]))
-
- // Ensure Tags equal and NOT same ref for tags
- assert.True(t, entry.Series.Tags().Equal(seriesTags))
- require.Equal(t, 1, len(entry.Series.Tags().Values()))
-
- entryTagNameBytes := entry.Series.Tags().Values()[0].Name.Bytes()
- entryTagValueBytes := entry.Series.Tags().Values()[0].Value.Bytes()
- seriesTagNameBytes := seriesTags.Values()[0].Name.Bytes()
- seriesTagValueBytes := seriesTags.Values()[0].Value.Bytes()
-
- // NB(r): Use &slice[0] to get a pointer to the very first byte, i.e. data section
- assert.False(t, unsafe.Pointer(&entryTagNameBytes[0]) == unsafe.Pointer(&seriesTagNameBytes[0]))
- assert.False(t, unsafe.Pointer(&entryTagValueBytes[0]) == unsafe.Pointer(&seriesTagValueBytes[0]))
}
func TestShardIterateBatchSize(t *testing.T) {
diff --git a/src/dbnode/storage/stats/query_stats.go b/src/dbnode/storage/stats/query_stats.go
new file mode 100644
index 0000000000..1e925f97a2
--- /dev/null
+++ b/src/dbnode/storage/stats/query_stats.go
@@ -0,0 +1,165 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package stats
+
+import (
+ "fmt"
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+// For tracking query stats in past X duration such as blocks queried.
+type queryStats struct {
+ tracker QueryStatsTracker
+
+ recentDocs *atomic.Int64
+ stopCh chan struct{}
+}
+
+type noOpQueryStats struct {
+}
+
+var (
+ _ QueryStats = (*queryStats)(nil)
+ _ QueryStats = (*noOpQueryStats)(nil)
+)
+
+// QueryStats provides an interface for updating query stats.
+type QueryStats interface {
+ Update(newDocs int) error
+ Start()
+ Stop()
+}
+
+// QueryStatsOptions holds options for how a tracker should handle query stats.
+type QueryStatsOptions struct {
+ // MaxDocs limits how many recently queried max
+ // documents are allowed before queries are abandoned.
+ MaxDocs int64
+ // Lookback specifies the lookback period over which stats are aggregated.
+ Lookback time.Duration
+}
+
+// QueryStatsValues stores values of query stats.
+type QueryStatsValues struct {
+ RecentDocs int64
+ NewDocs int64
+}
+
+var zeros = QueryStatsValues{
+ RecentDocs: 0,
+ NewDocs: 0,
+}
+
+// QueryStatsTracker provides an interface for tracking current query stats.
+type QueryStatsTracker interface {
+ Lookback() time.Duration
+ TrackStats(stats QueryStatsValues) error
+}
+
+// NewQueryStats enables query stats to be tracked within a recency lookback duration.
+func NewQueryStats(tracker QueryStatsTracker) QueryStats {
+ return &queryStats{
+ tracker: tracker,
+ recentDocs: atomic.NewInt64(0),
+ stopCh: make(chan struct{}),
+ }
+}
+
+// NoOpQueryStats returns inactive query stats.
+func NoOpQueryStats() QueryStats {
+ return &noOpQueryStats{}
+}
+
+// UpdateQueryStats adds new query stats which are being tracked.
+func (q *queryStats) Update(newDocs int) error {
+ if q == nil {
+ return nil
+ }
+ if newDocs <= 0 {
+ return nil
+ }
+
+ newDocsI64 := int64(newDocs)
+
+ // Add the new stats to the global state.
+ recentDocs := q.recentDocs.Add(newDocsI64)
+
+ values := QueryStatsValues{
+ RecentDocs: recentDocs,
+ NewDocs: newDocsI64,
+ }
+
+ // Invoke the custom tracker based on the new stats values.
+ return q.tracker.TrackStats(values)
+}
+
+// Start initializes background processing for handling query stats.
+func (q *queryStats) Start() {
+ if q == nil {
+ return
+ }
+ go func() {
+ ticker := time.NewTicker(q.tracker.Lookback())
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ // Clear recent docs every X duration.
+ q.recentDocs.Store(0)
+
+ // Also invoke the track func for having zero value.
+ _ = q.tracker.TrackStats(zeros)
+ case <-q.stopCh:
+ return
+ }
+ }
+ }()
+}
+
+func (q *queryStats) Stop() {
+ if q == nil {
+ return
+ }
+ close(q.stopCh)
+}
+
+func (q *noOpQueryStats) Update(int) error {
+ return nil
+}
+
+func (q *noOpQueryStats) Stop() {
+}
+
+func (q *noOpQueryStats) Start() {
+}
+
+// Validate returns an error if the query stats options are invalid.
+func (opts QueryStatsOptions) Validate() error {
+ if opts.MaxDocs < 0 {
+ return fmt.Errorf("query stats tracker requires max docs >= 0 (%d)", opts.MaxDocs)
+ }
+ if opts.Lookback <= 0 {
+ return fmt.Errorf("query stats tracker requires lookback > 0 (%d)", opts.Lookback)
+ }
+ return nil
+}
diff --git a/src/dbnode/storage/stats/query_stats_default_tracker.go b/src/dbnode/storage/stats/query_stats_default_tracker.go
new file mode 100644
index 0000000000..220ba1c5ef
--- /dev/null
+++ b/src/dbnode/storage/stats/query_stats_default_tracker.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package stats
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/uber-go/tally"
+)
+
+// DefaultLookback is the default lookback used for query stats tracking.
+const DefaultLookback = time.Second * 5
+
+// Tracker implementation that emits query stats as metrics.
+type queryStatsTracker struct {
+ recentDocs tally.Gauge
+ totalDocs tally.Counter
+
+ options QueryStatsOptions
+}
+
+var _ QueryStatsTracker = (*queryStatsTracker)(nil)
+
+// DefaultQueryStatsTracker provides a tracker
+// implementation that emits query stats as metrics
+// and enforces limits.
+func DefaultQueryStatsTracker(
+ instrumentOpts instrument.Options,
+ queryStatsOpts QueryStatsOptions,
+) QueryStatsTracker {
+ scope := instrumentOpts.
+ MetricsScope().
+ SubScope("query-stats")
+ return &queryStatsTracker{
+ options: queryStatsOpts,
+ recentDocs: scope.Gauge("recent-docs-per-block"),
+ totalDocs: scope.Counter("total-docs-per-block"),
+ }
+}
+
+func (t *queryStatsTracker) TrackStats(values QueryStatsValues) error {
+ // Track stats as metrics.
+ t.recentDocs.Update(float64(values.RecentDocs))
+ t.totalDocs.Inc(values.NewDocs)
+
+ // Enforce max queried docs (if specified).
+ if t.options.MaxDocs > 0 && values.RecentDocs > t.options.MaxDocs {
+ return fmt.Errorf(
+ "query aborted, global recent time series blocks over limit: "+
+ "limit=%d, current=%d, within=%s",
+ t.options.MaxDocs, values.RecentDocs, t.options.Lookback)
+ }
+ return nil
+}
+
+func (t *queryStatsTracker) Lookback() time.Duration {
+ return t.options.Lookback
+}
diff --git a/src/dbnode/storage/stats/query_stats_default_tracker_test.go b/src/dbnode/storage/stats/query_stats_default_tracker_test.go
new file mode 100644
index 0000000000..8913cb1972
--- /dev/null
+++ b/src/dbnode/storage/stats/query_stats_default_tracker_test.go
@@ -0,0 +1,191 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package stats
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/uber-go/tally"
+)
+
+func TestValidateTrackerInputs(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ maxDocs int64
+ lookback time.Duration
+ expectedError string
+ }{
+ {
+ name: "valid lookback without limit",
+ maxDocs: 0,
+ lookback: time.Millisecond,
+ },
+ {
+ name: "valid lookback with valid limit",
+ maxDocs: 1,
+ lookback: time.Millisecond,
+ },
+ {
+ name: "negative lookback",
+ maxDocs: 0,
+ lookback: -time.Millisecond,
+ expectedError: "query stats tracker requires lookback > 0 (-1000000)",
+ },
+ {
+ name: "zero lookback",
+ maxDocs: 0,
+ lookback: time.Duration(0),
+ expectedError: "query stats tracker requires lookback > 0 (0)",
+ },
+ {
+ name: "negative max",
+ maxDocs: -1,
+ lookback: time.Millisecond,
+ expectedError: "query stats tracker requires max docs >= 0 (-1)",
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ err := QueryStatsOptions{
+ MaxDocs: test.maxDocs,
+ Lookback: test.lookback,
+ }.Validate()
+ if test.expectedError != "" {
+ require.Error(t, err)
+ require.Equal(t, test.expectedError, err.Error())
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestEmitQueryStatsBasedMetrics(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ opts QueryStatsOptions
+ }{
+ {
+ name: "metrics only",
+ opts: QueryStatsOptions{
+ Lookback: time.Second,
+ },
+ },
+ {
+ name: "metrics and limits",
+ opts: QueryStatsOptions{
+ MaxDocs: 1000,
+ Lookback: time.Second,
+ },
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ scope := tally.NewTestScope("", nil)
+ opts := instrument.NewOptions().SetMetricsScope(scope)
+
+ tracker := DefaultQueryStatsTracker(opts, test.opts)
+
+ err := tracker.TrackStats(QueryStatsValues{RecentDocs: 100, NewDocs: 5})
+ require.NoError(t, err)
+ verifyMetrics(t, scope, 100, 5)
+
+ err = tracker.TrackStats(QueryStatsValues{RecentDocs: 140, NewDocs: 10})
+ require.NoError(t, err)
+ verifyMetrics(t, scope, 140, 15)
+ })
+ }
+}
+
+func TestLimitMaxDocs(t *testing.T) {
+ scope := tally.NewTestScope("", nil)
+ opts := instrument.NewOptions().SetMetricsScope(scope)
+
+ maxDocs := int64(100)
+
+ for _, test := range []struct {
+ name string
+ opts QueryStatsOptions
+ expectLimitError string
+ }{
+ {
+ name: "metrics only",
+ opts: QueryStatsOptions{
+ Lookback: time.Second,
+ },
+ },
+ {
+ name: "metrics and limits",
+ opts: QueryStatsOptions{
+ MaxDocs: 100,
+ Lookback: time.Second,
+ },
+ expectLimitError: "query aborted, global recent time series blocks over limit: limit=100, current=101, within=1s",
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ tracker := DefaultQueryStatsTracker(opts, test.opts)
+
+ err := tracker.TrackStats(QueryStatsValues{RecentDocs: maxDocs + 1})
+ if test.expectLimitError != "" {
+ require.Error(t, err)
+ require.Equal(t, test.expectLimitError, err.Error())
+ } else {
+ require.NoError(t, err)
+ }
+
+ err = tracker.TrackStats(QueryStatsValues{RecentDocs: maxDocs - 1})
+ require.NoError(t, err)
+
+ err = tracker.TrackStats(QueryStatsValues{RecentDocs: 0})
+ require.NoError(t, err)
+
+ err = tracker.TrackStats(QueryStatsValues{RecentDocs: maxDocs + 1})
+ if test.expectLimitError != "" {
+ require.Error(t, err)
+ require.Equal(t, test.expectLimitError, err.Error())
+ } else {
+ require.NoError(t, err)
+ }
+
+ err = tracker.TrackStats(QueryStatsValues{RecentDocs: maxDocs - 1})
+ require.NoError(t, err)
+
+ err = tracker.TrackStats(QueryStatsValues{RecentDocs: 0})
+ require.NoError(t, err)
+ })
+ }
+}
+
+func verifyMetrics(t *testing.T, scope tally.TestScope, expectedRecent float64, expectedTotal int64) {
+ snapshot := scope.Snapshot()
+
+ recent, exists := snapshot.Gauges()["query-stats.recent-docs-per-block+"]
+ assert.True(t, exists)
+ assert.Equal(t, expectedRecent, recent.Value())
+
+ total, exists := snapshot.Counters()["query-stats.total-docs-per-block+"]
+ assert.True(t, exists)
+ assert.Equal(t, expectedTotal, total.Value())
+}
diff --git a/src/dbnode/storage/stats/query_stats_test.go b/src/dbnode/storage/stats/query_stats_test.go
new file mode 100644
index 0000000000..db746c7094
--- /dev/null
+++ b/src/dbnode/storage/stats/query_stats_test.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package stats
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ xclock "github.com/m3db/m3/src/x/clock"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type testQueryStatsTracker struct {
+ sync.RWMutex
+ QueryStatsValues
+ lookback time.Duration
+}
+
+var _ QueryStatsTracker = (*testQueryStatsTracker)(nil)
+
+func (t *testQueryStatsTracker) TrackStats(values QueryStatsValues) error {
+ t.Lock()
+ defer t.Unlock()
+
+ t.QueryStatsValues = values
+ return nil
+}
+
+func (t *testQueryStatsTracker) StatsValues() QueryStatsValues {
+ t.RLock()
+ defer t.RUnlock()
+
+ return t.QueryStatsValues
+}
+
+func (t *testQueryStatsTracker) Lookback() time.Duration {
+ return t.lookback
+}
+
+func TestUpdateTracker(t *testing.T) {
+ tracker := &testQueryStatsTracker{}
+
+ queryStats := NewQueryStats(tracker)
+ defer queryStats.Stop()
+
+ err := queryStats.Update(3)
+ require.NoError(t, err)
+ verifyStats(t, tracker, 3, 3)
+
+ err = queryStats.Update(2)
+ require.NoError(t, err)
+ verifyStats(t, tracker, 2, 5)
+}
+
+func TestPeriodicallyResetRecentDocs(t *testing.T) {
+ tracker := &testQueryStatsTracker{lookback: time.Millisecond}
+
+ queryStats := NewQueryStats(tracker)
+
+ err := queryStats.Update(1)
+ require.NoError(t, err)
+ verifyStats(t, tracker, 1, 1)
+
+ queryStats.Start()
+ defer queryStats.Stop()
+ time.Sleep(tracker.lookback * 2)
+
+ success := xclock.WaitUntil(func() bool {
+ return statsEqual(tracker.StatsValues(), 0, 0)
+ }, 10*time.Second)
+ require.True(t, success, "did not eventually reset")
+}
+
+func verifyStats(t *testing.T, tracker *testQueryStatsTracker, expectedNew int64, expectedRecent int64) {
+ values := tracker.StatsValues()
+ assert.True(t, statsEqual(values, expectedNew, expectedRecent))
+}
+
+func statsEqual(values QueryStatsValues, expectedNew int64, expectedRecent int64) bool {
+ return expectedNew == values.NewDocs &&
+ expectedRecent == values.RecentDocs
+}
diff --git a/src/dbnode/storage/storage_mock.go b/src/dbnode/storage/storage_mock.go
index ac700625b3..7fdf6edb4c 100644
--- a/src/dbnode/storage/storage_mock.go
+++ b/src/dbnode/storage/storage_mock.go
@@ -43,9 +43,10 @@ import (
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/repair"
"github.com/m3db/m3/src/dbnode/storage/series"
- "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -255,10 +256,10 @@ func (mr *MockDatabaseMockRecorder) WriteTagged(ctx, namespace, id, tags, timest
}
// BatchWriter mocks base method
-func (m *MockDatabase) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error) {
+func (m *MockDatabase) BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BatchWriter", namespace, batchSize)
- ret0, _ := ret[0].(ts.BatchWriter)
+ ret0, _ := ret[0].(writes.BatchWriter)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -270,7 +271,7 @@ func (mr *MockDatabaseMockRecorder) BatchWriter(namespace, batchSize interface{}
}
// WriteBatch mocks base method
-func (m *MockDatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error {
+func (m *MockDatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteBatch", ctx, namespace, writes, errHandler)
ret0, _ := ret[0].(error)
@@ -284,7 +285,7 @@ func (mr *MockDatabaseMockRecorder) WriteBatch(ctx, namespace, writes, errHandle
}
// WriteTaggedBatch mocks base method
-func (m *MockDatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error {
+func (m *MockDatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteTaggedBatch", ctx, namespace, writes, errHandler)
ret0, _ := ret[0].(error)
@@ -650,10 +651,10 @@ func (mr *MockdatabaseMockRecorder) WriteTagged(ctx, namespace, id, tags, timest
}
// BatchWriter mocks base method
-func (m *Mockdatabase) BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error) {
+func (m *Mockdatabase) BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BatchWriter", namespace, batchSize)
- ret0, _ := ret[0].(ts.BatchWriter)
+ ret0, _ := ret[0].(writes.BatchWriter)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -665,7 +666,7 @@ func (mr *MockdatabaseMockRecorder) BatchWriter(namespace, batchSize interface{}
}
// WriteBatch mocks base method
-func (m *Mockdatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error {
+func (m *Mockdatabase) WriteBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteBatch", ctx, namespace, writes, errHandler)
ret0, _ := ret[0].(error)
@@ -679,7 +680,7 @@ func (mr *MockdatabaseMockRecorder) WriteBatch(ctx, namespace, writes, errHandle
}
// WriteTaggedBatch mocks base method
-func (m *Mockdatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes ts.BatchWriter, errHandler IndexedErrorHandler) error {
+func (m *Mockdatabase) WriteTaggedBatch(ctx context.Context, namespace ident.ID, writes writes.BatchWriter, errHandler IndexedErrorHandler) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteTaggedBatch", ctx, namespace, writes, errHandler)
ret0, _ := ret[0].(error)
@@ -882,19 +883,19 @@ func (mr *MockdatabaseMockRecorder) FlushState(namespace, shardID, blockStart in
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlushState", reflect.TypeOf((*Mockdatabase)(nil).FlushState), namespace, shardID, blockStart)
}
-// GetOwnedNamespaces mocks base method
-func (m *Mockdatabase) GetOwnedNamespaces() ([]databaseNamespace, error) {
+// OwnedNamespaces mocks base method
+func (m *Mockdatabase) OwnedNamespaces() ([]databaseNamespace, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetOwnedNamespaces")
+ ret := m.ctrl.Call(m, "OwnedNamespaces")
ret0, _ := ret[0].([]databaseNamespace)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// GetOwnedNamespaces indicates an expected call of GetOwnedNamespaces
-func (mr *MockdatabaseMockRecorder) GetOwnedNamespaces() *gomock.Call {
+// OwnedNamespaces indicates an expected call of OwnedNamespaces
+func (mr *MockdatabaseMockRecorder) OwnedNamespaces() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOwnedNamespaces", reflect.TypeOf((*Mockdatabase)(nil).GetOwnedNamespaces))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OwnedNamespaces", reflect.TypeOf((*Mockdatabase)(nil).OwnedNamespaces))
}
// UpdateOwnedNamespaces mocks base method
@@ -1018,6 +1019,35 @@ func (mr *MockNamespaceMockRecorder) Shards() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shards", reflect.TypeOf((*MockNamespace)(nil).Shards))
}
+// Index mocks base method
+func (m *MockNamespace) Index() (NamespaceIndex, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Index")
+ ret0, _ := ret[0].(NamespaceIndex)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Index indicates an expected call of Index
+func (mr *MockNamespaceMockRecorder) Index() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Index", reflect.TypeOf((*MockNamespace)(nil).Index))
+}
+
+// StorageOptions mocks base method
+func (m *MockNamespace) StorageOptions() Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StorageOptions")
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// StorageOptions indicates an expected call of StorageOptions
+func (mr *MockNamespaceMockRecorder) StorageOptions() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageOptions", reflect.TypeOf((*MockNamespace)(nil).StorageOptions))
+}
+
// MockdatabaseNamespace is a mock of databaseNamespace interface
type MockdatabaseNamespace struct {
ctrl *gomock.Controller
@@ -1125,6 +1155,35 @@ func (mr *MockdatabaseNamespaceMockRecorder) Shards() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shards", reflect.TypeOf((*MockdatabaseNamespace)(nil).Shards))
}
+// Index mocks base method
+func (m *MockdatabaseNamespace) Index() (NamespaceIndex, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Index")
+ ret0, _ := ret[0].(NamespaceIndex)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Index indicates an expected call of Index
+func (mr *MockdatabaseNamespaceMockRecorder) Index() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Index", reflect.TypeOf((*MockdatabaseNamespace)(nil).Index))
+}
+
+// StorageOptions mocks base method
+func (m *MockdatabaseNamespace) StorageOptions() Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StorageOptions")
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// StorageOptions indicates an expected call of StorageOptions
+func (mr *MockdatabaseNamespaceMockRecorder) StorageOptions() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageOptions", reflect.TypeOf((*MockdatabaseNamespace)(nil).StorageOptions))
+}
+
// Close mocks base method
func (m *MockdatabaseNamespace) Close() error {
m.ctrl.T.Helper()
@@ -1151,33 +1210,18 @@ func (mr *MockdatabaseNamespaceMockRecorder) AssignShardSet(shardSet interface{}
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssignShardSet", reflect.TypeOf((*MockdatabaseNamespace)(nil).AssignShardSet), shardSet)
}
-// GetOwnedShards mocks base method
-func (m *MockdatabaseNamespace) GetOwnedShards() []databaseShard {
+// OwnedShards mocks base method
+func (m *MockdatabaseNamespace) OwnedShards() []databaseShard {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetOwnedShards")
+ ret := m.ctrl.Call(m, "OwnedShards")
ret0, _ := ret[0].([]databaseShard)
return ret0
}
-// GetOwnedShards indicates an expected call of GetOwnedShards
-func (mr *MockdatabaseNamespaceMockRecorder) GetOwnedShards() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOwnedShards", reflect.TypeOf((*MockdatabaseNamespace)(nil).GetOwnedShards))
-}
-
-// GetIndex mocks base method
-func (m *MockdatabaseNamespace) GetIndex() (namespaceIndex, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetIndex")
- ret0, _ := ret[0].(namespaceIndex)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetIndex indicates an expected call of GetIndex
-func (mr *MockdatabaseNamespaceMockRecorder) GetIndex() *gomock.Call {
+// OwnedShards indicates an expected call of OwnedShards
+func (mr *MockdatabaseNamespaceMockRecorder) OwnedShards() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndex", reflect.TypeOf((*MockdatabaseNamespace)(nil).GetIndex))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OwnedShards", reflect.TypeOf((*MockdatabaseNamespace)(nil).OwnedShards))
}
// Tick mocks base method
@@ -1195,13 +1239,12 @@ func (mr *MockdatabaseNamespaceMockRecorder) Tick(c, startTime interface{}) *gom
}
// Write mocks base method
-func (m *MockdatabaseNamespace) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (ts.Series, bool, error) {
+func (m *MockdatabaseNamespace) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (SeriesWrite, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation)
- ret0, _ := ret[0].(ts.Series)
- ret1, _ := ret[1].(bool)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret0, _ := ret[0].(SeriesWrite)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// Write indicates an expected call of Write
@@ -1211,13 +1254,12 @@ func (mr *MockdatabaseNamespaceMockRecorder) Write(ctx, id, timestamp, value, un
}
// WriteTagged mocks base method
-func (m *MockdatabaseNamespace) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (ts.Series, bool, error) {
+func (m *MockdatabaseNamespace) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) (SeriesWrite, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteTagged", ctx, id, tags, timestamp, value, unit, annotation)
- ret0, _ := ret[0].(ts.Series)
- ret1, _ := ret[1].(bool)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret0, _ := ret[0].(SeriesWrite)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// WriteTagged indicates an expected call of WriteTagged
@@ -1303,32 +1345,32 @@ func (mr *MockdatabaseNamespaceMockRecorder) FetchBlocksMetadataV2(ctx, shardID,
}
// PrepareBootstrap mocks base method
-func (m *MockdatabaseNamespace) PrepareBootstrap() ([]databaseShard, error) {
+func (m *MockdatabaseNamespace) PrepareBootstrap(ctx context.Context) ([]databaseShard, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "PrepareBootstrap")
+ ret := m.ctrl.Call(m, "PrepareBootstrap", ctx)
ret0, _ := ret[0].([]databaseShard)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PrepareBootstrap indicates an expected call of PrepareBootstrap
-func (mr *MockdatabaseNamespaceMockRecorder) PrepareBootstrap() *gomock.Call {
+func (mr *MockdatabaseNamespaceMockRecorder) PrepareBootstrap(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareBootstrap", reflect.TypeOf((*MockdatabaseNamespace)(nil).PrepareBootstrap))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareBootstrap", reflect.TypeOf((*MockdatabaseNamespace)(nil).PrepareBootstrap), ctx)
}
// Bootstrap mocks base method
-func (m *MockdatabaseNamespace) Bootstrap(bootstrapResult bootstrap.NamespaceResult) error {
+func (m *MockdatabaseNamespace) Bootstrap(ctx context.Context, bootstrapResult bootstrap.NamespaceResult) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Bootstrap", bootstrapResult)
+ ret := m.ctrl.Call(m, "Bootstrap", ctx, bootstrapResult)
ret0, _ := ret[0].(error)
return ret0
}
// Bootstrap indicates an expected call of Bootstrap
-func (mr *MockdatabaseNamespaceMockRecorder) Bootstrap(bootstrapResult interface{}) *gomock.Call {
+func (mr *MockdatabaseNamespaceMockRecorder) Bootstrap(ctx, bootstrapResult interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockdatabaseNamespace)(nil).Bootstrap), bootstrapResult)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockdatabaseNamespace)(nil).Bootstrap), ctx, bootstrapResult)
}
// WarmFlush mocks base method
@@ -1476,6 +1518,20 @@ func (mr *MockdatabaseNamespaceMockRecorder) SeriesReadWriteRef(shardID, id, tag
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SeriesReadWriteRef", reflect.TypeOf((*MockdatabaseNamespace)(nil).SeriesReadWriteRef), shardID, id, tags)
}
+// WritePendingIndexInserts mocks base method
+func (m *MockdatabaseNamespace) WritePendingIndexInserts(pending []writes.PendingIndexInsert) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WritePendingIndexInserts", pending)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WritePendingIndexInserts indicates an expected call of WritePendingIndexInserts
+func (mr *MockdatabaseNamespaceMockRecorder) WritePendingIndexInserts(pending interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePendingIndexInserts", reflect.TypeOf((*MockdatabaseNamespace)(nil).WritePendingIndexInserts), pending)
+}
+
// MockShard is a mock of Shard interface
type MockShard struct {
ctrl *gomock.Controller
@@ -1676,13 +1732,12 @@ func (mr *MockdatabaseShardMockRecorder) Tick(c, startTime, nsCtx interface{}) *
}
// Write mocks base method
-func (m *MockdatabaseShard) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) {
+func (m *MockdatabaseShard) Write(ctx context.Context, id ident.ID, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (SeriesWrite, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Write", ctx, id, timestamp, value, unit, annotation, wOpts)
- ret0, _ := ret[0].(ts.Series)
- ret1, _ := ret[1].(bool)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret0, _ := ret[0].(SeriesWrite)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// Write indicates an expected call of Write
@@ -1692,13 +1747,12 @@ func (mr *MockdatabaseShardMockRecorder) Write(ctx, id, timestamp, value, unit,
}
// WriteTagged mocks base method
-func (m *MockdatabaseShard) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (ts.Series, bool, error) {
+func (m *MockdatabaseShard) WriteTagged(ctx context.Context, id ident.ID, tags ident.TagIterator, timestamp time.Time, value float64, unit time0.Unit, annotation []byte, wOpts series.WriteOptions) (SeriesWrite, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteTagged", ctx, id, tags, timestamp, value, unit, annotation, wOpts)
- ret0, _ := ret[0].(ts.Series)
- ret1, _ := ret[1].(bool)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
+ ret0, _ := ret[0].(SeriesWrite)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// WriteTagged indicates an expected call of WriteTagged
@@ -1738,10 +1792,10 @@ func (mr *MockdatabaseShardMockRecorder) FetchBlocks(ctx, id, starts, nsCtx inte
}
// FetchBlocksForColdFlush mocks base method
-func (m *MockdatabaseShard) FetchBlocksForColdFlush(ctx context.Context, seriesID ident.ID, start time.Time, version int, nsCtx namespace.Context) ([]xio.BlockReader, error) {
+func (m *MockdatabaseShard) FetchBlocksForColdFlush(ctx context.Context, seriesID ident.ID, start time.Time, version int, nsCtx namespace.Context) (block.FetchBlockResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchBlocksForColdFlush", ctx, seriesID, start, version, nsCtx)
- ret0, _ := ret[0].([]xio.BlockReader)
+ ret0, _ := ret[0].(block.FetchBlockResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -1769,31 +1823,31 @@ func (mr *MockdatabaseShardMockRecorder) FetchBlocksMetadataV2(ctx, start, end,
}
// PrepareBootstrap mocks base method
-func (m *MockdatabaseShard) PrepareBootstrap() error {
+func (m *MockdatabaseShard) PrepareBootstrap(ctx context.Context) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "PrepareBootstrap")
+ ret := m.ctrl.Call(m, "PrepareBootstrap", ctx)
ret0, _ := ret[0].(error)
return ret0
}
// PrepareBootstrap indicates an expected call of PrepareBootstrap
-func (mr *MockdatabaseShardMockRecorder) PrepareBootstrap() *gomock.Call {
+func (mr *MockdatabaseShardMockRecorder) PrepareBootstrap(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareBootstrap", reflect.TypeOf((*MockdatabaseShard)(nil).PrepareBootstrap))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareBootstrap", reflect.TypeOf((*MockdatabaseShard)(nil).PrepareBootstrap), ctx)
}
// Bootstrap mocks base method
-func (m *MockdatabaseShard) Bootstrap() error {
+func (m *MockdatabaseShard) Bootstrap(ctx context.Context, nsCtx namespace.Context) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Bootstrap")
+ ret := m.ctrl.Call(m, "Bootstrap", ctx, nsCtx)
ret0, _ := ret[0].(error)
return ret0
}
// Bootstrap indicates an expected call of Bootstrap
-func (mr *MockdatabaseShardMockRecorder) Bootstrap() *gomock.Call {
+func (mr *MockdatabaseShardMockRecorder) Bootstrap(ctx, nsCtx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockdatabaseShard)(nil).Bootstrap))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockdatabaseShard)(nil).Bootstrap), ctx, nsCtx)
}
// UpdateFlushStates mocks base method
@@ -1837,25 +1891,27 @@ func (mr *MockdatabaseShardMockRecorder) WarmFlush(blockStart, flush, nsCtx inte
}
// ColdFlush mocks base method
-func (m *MockdatabaseShard) ColdFlush(flush persist.FlushPreparer, resources coldFlushReuseableResources, nsCtx namespace.Context) error {
+func (m *MockdatabaseShard) ColdFlush(flush persist.FlushPreparer, resources coldFlushReuseableResources, nsCtx namespace.Context, onFlush persist.OnFlushSeries) (ShardColdFlush, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ColdFlush", flush, resources, nsCtx)
- ret0, _ := ret[0].(error)
- return ret0
+ ret := m.ctrl.Call(m, "ColdFlush", flush, resources, nsCtx, onFlush)
+ ret0, _ := ret[0].(ShardColdFlush)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// ColdFlush indicates an expected call of ColdFlush
-func (mr *MockdatabaseShardMockRecorder) ColdFlush(flush, resources, nsCtx interface{}) *gomock.Call {
+func (mr *MockdatabaseShardMockRecorder) ColdFlush(flush, resources, nsCtx, onFlush interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ColdFlush", reflect.TypeOf((*MockdatabaseShard)(nil).ColdFlush), flush, resources, nsCtx)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ColdFlush", reflect.TypeOf((*MockdatabaseShard)(nil).ColdFlush), flush, resources, nsCtx, onFlush)
}
// Snapshot mocks base method
-func (m *MockdatabaseShard) Snapshot(blockStart, snapshotStart time.Time, flush persist.SnapshotPreparer, nsCtx namespace.Context) error {
+func (m *MockdatabaseShard) Snapshot(blockStart, snapshotStart time.Time, flush persist.SnapshotPreparer, nsCtx namespace.Context) (ShardSnapshotResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Snapshot", blockStart, snapshotStart, flush, nsCtx)
- ret0, _ := ret[0].(error)
- return ret0
+ ret0, _ := ret[0].(ShardSnapshotResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
}
// Snapshot indicates an expected call of Snapshot
@@ -1922,22 +1978,6 @@ func (mr *MockdatabaseShardMockRecorder) Repair(ctx, nsCtx, nsMeta, tr, repairer
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repair", reflect.TypeOf((*MockdatabaseShard)(nil).Repair), ctx, nsCtx, nsMeta, tr, repairer)
}
-// TagsFromSeriesID mocks base method
-func (m *MockdatabaseShard) TagsFromSeriesID(seriesID ident.ID) (ident.Tags, bool, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "TagsFromSeriesID", seriesID)
- ret0, _ := ret[0].(ident.Tags)
- ret1, _ := ret[1].(bool)
- ret2, _ := ret[2].(error)
- return ret0, ret1, ret2
-}
-
-// TagsFromSeriesID indicates an expected call of TagsFromSeriesID
-func (mr *MockdatabaseShardMockRecorder) TagsFromSeriesID(seriesID interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagsFromSeriesID", reflect.TypeOf((*MockdatabaseShard)(nil).TagsFromSeriesID), seriesID)
-}
-
// SeriesReadWriteRef mocks base method
func (m *MockdatabaseShard) SeriesReadWriteRef(id ident.ID, tags ident.TagIterator, opts ShardSeriesReadWriteRefOptions) (SeriesReadWriteRef, error) {
m.ctrl.T.Helper()
@@ -1953,43 +1993,96 @@ func (mr *MockdatabaseShardMockRecorder) SeriesReadWriteRef(id, tags, opts inter
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SeriesReadWriteRef", reflect.TypeOf((*MockdatabaseShard)(nil).SeriesReadWriteRef), id, tags, opts)
}
-// MocknamespaceIndex is a mock of namespaceIndex interface
-type MocknamespaceIndex struct {
+// DocRef mocks base method
+func (m *MockdatabaseShard) DocRef(id ident.ID) (doc.Document, bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DocRef", id)
+ ret0, _ := ret[0].(doc.Document)
+ ret1, _ := ret[1].(bool)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
+}
+
+// DocRef indicates an expected call of DocRef
+func (mr *MockdatabaseShardMockRecorder) DocRef(id interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DocRef", reflect.TypeOf((*MockdatabaseShard)(nil).DocRef), id)
+}
+
+// MockShardColdFlush is a mock of ShardColdFlush interface
+type MockShardColdFlush struct {
ctrl *gomock.Controller
- recorder *MocknamespaceIndexMockRecorder
+ recorder *MockShardColdFlushMockRecorder
}
-// MocknamespaceIndexMockRecorder is the mock recorder for MocknamespaceIndex
-type MocknamespaceIndexMockRecorder struct {
- mock *MocknamespaceIndex
+// MockShardColdFlushMockRecorder is the mock recorder for MockShardColdFlush
+type MockShardColdFlushMockRecorder struct {
+ mock *MockShardColdFlush
}
-// NewMocknamespaceIndex creates a new mock instance
-func NewMocknamespaceIndex(ctrl *gomock.Controller) *MocknamespaceIndex {
- mock := &MocknamespaceIndex{ctrl: ctrl}
- mock.recorder = &MocknamespaceIndexMockRecorder{mock}
+// NewMockShardColdFlush creates a new mock instance
+func NewMockShardColdFlush(ctrl *gomock.Controller) *MockShardColdFlush {
+ mock := &MockShardColdFlush{ctrl: ctrl}
+ mock.recorder = &MockShardColdFlushMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
-func (m *MocknamespaceIndex) EXPECT() *MocknamespaceIndexMockRecorder {
+func (m *MockShardColdFlush) EXPECT() *MockShardColdFlushMockRecorder {
+ return m.recorder
+}
+
+// Done mocks base method
+func (m *MockShardColdFlush) Done() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Done")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Done indicates an expected call of Done
+func (mr *MockShardColdFlushMockRecorder) Done() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockShardColdFlush)(nil).Done))
+}
+
+// MockNamespaceIndex is a mock of NamespaceIndex interface
+type MockNamespaceIndex struct {
+ ctrl *gomock.Controller
+ recorder *MockNamespaceIndexMockRecorder
+}
+
+// MockNamespaceIndexMockRecorder is the mock recorder for MockNamespaceIndex
+type MockNamespaceIndexMockRecorder struct {
+ mock *MockNamespaceIndex
+}
+
+// NewMockNamespaceIndex creates a new mock instance
+func NewMockNamespaceIndex(ctrl *gomock.Controller) *MockNamespaceIndex {
+ mock := &MockNamespaceIndex{ctrl: ctrl}
+ mock.recorder = &MockNamespaceIndexMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockNamespaceIndex) EXPECT() *MockNamespaceIndexMockRecorder {
return m.recorder
}
// AssignShardSet mocks base method
-func (m *MocknamespaceIndex) AssignShardSet(shardSet sharding.ShardSet) {
+func (m *MockNamespaceIndex) AssignShardSet(shardSet sharding.ShardSet) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "AssignShardSet", shardSet)
}
// AssignShardSet indicates an expected call of AssignShardSet
-func (mr *MocknamespaceIndexMockRecorder) AssignShardSet(shardSet interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) AssignShardSet(shardSet interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssignShardSet", reflect.TypeOf((*MocknamespaceIndex)(nil).AssignShardSet), shardSet)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssignShardSet", reflect.TypeOf((*MockNamespaceIndex)(nil).AssignShardSet), shardSet)
}
// BlockStartForWriteTime mocks base method
-func (m *MocknamespaceIndex) BlockStartForWriteTime(writeTime time.Time) time0.UnixNano {
+func (m *MockNamespaceIndex) BlockStartForWriteTime(writeTime time.Time) time0.UnixNano {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BlockStartForWriteTime", writeTime)
ret0, _ := ret[0].(time0.UnixNano)
@@ -1997,13 +2090,28 @@ func (m *MocknamespaceIndex) BlockStartForWriteTime(writeTime time.Time) time0.U
}
// BlockStartForWriteTime indicates an expected call of BlockStartForWriteTime
-func (mr *MocknamespaceIndexMockRecorder) BlockStartForWriteTime(writeTime interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) BlockStartForWriteTime(writeTime interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockStartForWriteTime", reflect.TypeOf((*MockNamespaceIndex)(nil).BlockStartForWriteTime), writeTime)
+}
+
+// BlockForBlockStart mocks base method
+func (m *MockNamespaceIndex) BlockForBlockStart(blockStart time.Time) (index.Block, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BlockForBlockStart", blockStart)
+ ret0, _ := ret[0].(index.Block)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BlockForBlockStart indicates an expected call of BlockForBlockStart
+func (mr *MockNamespaceIndexMockRecorder) BlockForBlockStart(blockStart interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockStartForWriteTime", reflect.TypeOf((*MocknamespaceIndex)(nil).BlockStartForWriteTime), writeTime)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockForBlockStart", reflect.TypeOf((*MockNamespaceIndex)(nil).BlockForBlockStart), blockStart)
}
// WriteBatch mocks base method
-func (m *MocknamespaceIndex) WriteBatch(batch *index.WriteBatch) error {
+func (m *MockNamespaceIndex) WriteBatch(batch *index.WriteBatch) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteBatch", batch)
ret0, _ := ret[0].(error)
@@ -2011,13 +2119,27 @@ func (m *MocknamespaceIndex) WriteBatch(batch *index.WriteBatch) error {
}
// WriteBatch indicates an expected call of WriteBatch
-func (mr *MocknamespaceIndexMockRecorder) WriteBatch(batch interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) WriteBatch(batch interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatch", reflect.TypeOf((*MockNamespaceIndex)(nil).WriteBatch), batch)
+}
+
+// WritePending mocks base method
+func (m *MockNamespaceIndex) WritePending(pending []writes.PendingIndexInsert) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WritePending", pending)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WritePending indicates an expected call of WritePending
+func (mr *MockNamespaceIndexMockRecorder) WritePending(pending interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatch", reflect.TypeOf((*MocknamespaceIndex)(nil).WriteBatch), batch)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePending", reflect.TypeOf((*MockNamespaceIndex)(nil).WritePending), pending)
}
// Query mocks base method
-func (m *MocknamespaceIndex) Query(ctx context.Context, query index.Query, opts index.QueryOptions) (index.QueryResult, error) {
+func (m *MockNamespaceIndex) Query(ctx context.Context, query index.Query, opts index.QueryOptions) (index.QueryResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Query", ctx, query, opts)
ret0, _ := ret[0].(index.QueryResult)
@@ -2026,13 +2148,13 @@ func (m *MocknamespaceIndex) Query(ctx context.Context, query index.Query, opts
}
// Query indicates an expected call of Query
-func (mr *MocknamespaceIndexMockRecorder) Query(ctx, query, opts interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) Query(ctx, query, opts interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MocknamespaceIndex)(nil).Query), ctx, query, opts)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockNamespaceIndex)(nil).Query), ctx, query, opts)
}
// AggregateQuery mocks base method
-func (m *MocknamespaceIndex) AggregateQuery(ctx context.Context, query index.Query, opts index.AggregationOptions) (index.AggregateQueryResult, error) {
+func (m *MockNamespaceIndex) AggregateQuery(ctx context.Context, query index.Query, opts index.AggregationOptions) (index.AggregateQueryResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AggregateQuery", ctx, query, opts)
ret0, _ := ret[0].(index.AggregateQueryResult)
@@ -2041,13 +2163,13 @@ func (m *MocknamespaceIndex) AggregateQuery(ctx context.Context, query index.Que
}
// AggregateQuery indicates an expected call of AggregateQuery
-func (mr *MocknamespaceIndexMockRecorder) AggregateQuery(ctx, query, opts interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) AggregateQuery(ctx, query, opts interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateQuery", reflect.TypeOf((*MocknamespaceIndex)(nil).AggregateQuery), ctx, query, opts)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AggregateQuery", reflect.TypeOf((*MockNamespaceIndex)(nil).AggregateQuery), ctx, query, opts)
}
// Bootstrap mocks base method
-func (m *MocknamespaceIndex) Bootstrap(bootstrapResults result.IndexResults) error {
+func (m *MockNamespaceIndex) Bootstrap(bootstrapResults result.IndexResults) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Bootstrap", bootstrapResults)
ret0, _ := ret[0].(error)
@@ -2055,13 +2177,13 @@ func (m *MocknamespaceIndex) Bootstrap(bootstrapResults result.IndexResults) err
}
// Bootstrap indicates an expected call of Bootstrap
-func (mr *MocknamespaceIndexMockRecorder) Bootstrap(bootstrapResults interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) Bootstrap(bootstrapResults interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MocknamespaceIndex)(nil).Bootstrap), bootstrapResults)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockNamespaceIndex)(nil).Bootstrap), bootstrapResults)
}
// BootstrapsDone mocks base method
-func (m *MocknamespaceIndex) BootstrapsDone() uint {
+func (m *MockNamespaceIndex) BootstrapsDone() uint {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BootstrapsDone")
ret0, _ := ret[0].(uint)
@@ -2069,13 +2191,13 @@ func (m *MocknamespaceIndex) BootstrapsDone() uint {
}
// BootstrapsDone indicates an expected call of BootstrapsDone
-func (mr *MocknamespaceIndexMockRecorder) BootstrapsDone() *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) BootstrapsDone() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapsDone", reflect.TypeOf((*MocknamespaceIndex)(nil).BootstrapsDone))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapsDone", reflect.TypeOf((*MockNamespaceIndex)(nil).BootstrapsDone))
}
// CleanupExpiredFileSets mocks base method
-func (m *MocknamespaceIndex) CleanupExpiredFileSets(t time.Time) error {
+func (m *MockNamespaceIndex) CleanupExpiredFileSets(t time.Time) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupExpiredFileSets", t)
ret0, _ := ret[0].(error)
@@ -2083,13 +2205,27 @@ func (m *MocknamespaceIndex) CleanupExpiredFileSets(t time.Time) error {
}
// CleanupExpiredFileSets indicates an expected call of CleanupExpiredFileSets
-func (mr *MocknamespaceIndexMockRecorder) CleanupExpiredFileSets(t interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) CleanupExpiredFileSets(t interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupExpiredFileSets", reflect.TypeOf((*MocknamespaceIndex)(nil).CleanupExpiredFileSets), t)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupExpiredFileSets", reflect.TypeOf((*MockNamespaceIndex)(nil).CleanupExpiredFileSets), t)
+}
+
+// CleanupDuplicateFileSets mocks base method
+func (m *MockNamespaceIndex) CleanupDuplicateFileSets() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CleanupDuplicateFileSets")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CleanupDuplicateFileSets indicates an expected call of CleanupDuplicateFileSets
+func (mr *MockNamespaceIndexMockRecorder) CleanupDuplicateFileSets() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupDuplicateFileSets", reflect.TypeOf((*MockNamespaceIndex)(nil).CleanupDuplicateFileSets))
}
// Tick mocks base method
-func (m *MocknamespaceIndex) Tick(c context.Cancellable, startTime time.Time) (namespaceIndexTickResult, error) {
+func (m *MockNamespaceIndex) Tick(c context.Cancellable, startTime time.Time) (namespaceIndexTickResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Tick", c, startTime)
ret0, _ := ret[0].(namespaceIndexTickResult)
@@ -2098,27 +2234,56 @@ func (m *MocknamespaceIndex) Tick(c context.Cancellable, startTime time.Time) (n
}
// Tick indicates an expected call of Tick
-func (mr *MocknamespaceIndexMockRecorder) Tick(c, startTime interface{}) *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) Tick(c, startTime interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tick", reflect.TypeOf((*MocknamespaceIndex)(nil).Tick), c, startTime)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tick", reflect.TypeOf((*MockNamespaceIndex)(nil).Tick), c, startTime)
}
-// Flush mocks base method
-func (m *MocknamespaceIndex) Flush(flush persist.IndexFlush, shards []databaseShard) error {
+// WarmFlush mocks base method
+func (m *MockNamespaceIndex) WarmFlush(flush persist.IndexFlush, shards []databaseShard) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Flush", flush, shards)
+ ret := m.ctrl.Call(m, "WarmFlush", flush, shards)
ret0, _ := ret[0].(error)
return ret0
}
-// Flush indicates an expected call of Flush
-func (mr *MocknamespaceIndexMockRecorder) Flush(flush, shards interface{}) *gomock.Call {
+// WarmFlush indicates an expected call of WarmFlush
+func (mr *MockNamespaceIndexMockRecorder) WarmFlush(flush, shards interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MocknamespaceIndex)(nil).Flush), flush, shards)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WarmFlush", reflect.TypeOf((*MockNamespaceIndex)(nil).WarmFlush), flush, shards)
+}
+
+// ColdFlush mocks base method
+func (m *MockNamespaceIndex) ColdFlush(shards []databaseShard) (OnColdFlushDone, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ColdFlush", shards)
+ ret0, _ := ret[0].(OnColdFlushDone)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ColdFlush indicates an expected call of ColdFlush
+func (mr *MockNamespaceIndexMockRecorder) ColdFlush(shards interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ColdFlush", reflect.TypeOf((*MockNamespaceIndex)(nil).ColdFlush), shards)
+}
+
+// DebugMemorySegments mocks base method
+func (m *MockNamespaceIndex) DebugMemorySegments(opts DebugMemorySegmentsOptions) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DebugMemorySegments", opts)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DebugMemorySegments indicates an expected call of DebugMemorySegments
+func (mr *MockNamespaceIndexMockRecorder) DebugMemorySegments(opts interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DebugMemorySegments", reflect.TypeOf((*MockNamespaceIndex)(nil).DebugMemorySegments), opts)
}
// Close mocks base method
-func (m *MocknamespaceIndex) Close() error {
+func (m *MockNamespaceIndex) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
@@ -2126,9 +2291,9 @@ func (m *MocknamespaceIndex) Close() error {
}
// Close indicates an expected call of Close
-func (mr *MocknamespaceIndexMockRecorder) Close() *gomock.Call {
+func (mr *MockNamespaceIndexMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MocknamespaceIndex)(nil).Close))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockNamespaceIndex)(nil).Close))
}
// MocknamespaceIndexInsertQueue is a mock of namespaceIndexInsertQueue interface
@@ -2197,6 +2362,21 @@ func (mr *MocknamespaceIndexInsertQueueMockRecorder) InsertBatch(batch interface
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MocknamespaceIndexInsertQueue)(nil).InsertBatch), batch)
}
+// InsertPending mocks base method
+func (m *MocknamespaceIndexInsertQueue) InsertPending(pending []writes.PendingIndexInsert) (*sync.WaitGroup, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InsertPending", pending)
+ ret0, _ := ret[0].(*sync.WaitGroup)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// InsertPending indicates an expected call of InsertPending
+func (mr *MocknamespaceIndexInsertQueueMockRecorder) InsertPending(pending interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertPending", reflect.TypeOf((*MocknamespaceIndexInsertQueue)(nil).InsertPending), pending)
+}
+
// MockdatabaseBootstrapManager is a mock of databaseBootstrapManager interface
type MockdatabaseBootstrapManager struct {
ctrl *gomock.Controller
@@ -2235,10 +2415,10 @@ func (mr *MockdatabaseBootstrapManagerMockRecorder) IsBootstrapped() *gomock.Cal
}
// LastBootstrapCompletionTime mocks base method
-func (m *MockdatabaseBootstrapManager) LastBootstrapCompletionTime() (time.Time, bool) {
+func (m *MockdatabaseBootstrapManager) LastBootstrapCompletionTime() (time0.UnixNano, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LastBootstrapCompletionTime")
- ret0, _ := ret[0].(time.Time)
+ ret0, _ := ret[0].(time0.UnixNano)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
@@ -2314,10 +2494,10 @@ func (mr *MockdatabaseFlushManagerMockRecorder) Flush(startTime interface{}) *go
}
// LastSuccessfulSnapshotStartTime mocks base method
-func (m *MockdatabaseFlushManager) LastSuccessfulSnapshotStartTime() (time.Time, bool) {
+func (m *MockdatabaseFlushManager) LastSuccessfulSnapshotStartTime() (time0.UnixNano, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime")
- ret0, _ := ret[0].(time.Time)
+ ret0, _ := ret[0].(time0.UnixNano)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
@@ -2363,18 +2543,32 @@ func (m *MockdatabaseCleanupManager) EXPECT() *MockdatabaseCleanupManagerMockRec
return m.recorder
}
-// Cleanup mocks base method
-func (m *MockdatabaseCleanupManager) Cleanup(t time.Time) error {
+// WarmFlushCleanup mocks base method
+func (m *MockdatabaseCleanupManager) WarmFlushCleanup(t time.Time, isBootstrapped bool) error {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Cleanup", t)
+ ret := m.ctrl.Call(m, "WarmFlushCleanup", t, isBootstrapped)
ret0, _ := ret[0].(error)
return ret0
}
-// Cleanup indicates an expected call of Cleanup
-func (mr *MockdatabaseCleanupManagerMockRecorder) Cleanup(t interface{}) *gomock.Call {
+// WarmFlushCleanup indicates an expected call of WarmFlushCleanup
+func (mr *MockdatabaseCleanupManagerMockRecorder) WarmFlushCleanup(t, isBootstrapped interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockdatabaseCleanupManager)(nil).Cleanup), t)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WarmFlushCleanup", reflect.TypeOf((*MockdatabaseCleanupManager)(nil).WarmFlushCleanup), t, isBootstrapped)
+}
+
+// ColdFlushCleanup mocks base method
+func (m *MockdatabaseCleanupManager) ColdFlushCleanup(t time.Time, isBootstrapped bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ColdFlushCleanup", t, isBootstrapped)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ColdFlushCleanup indicates an expected call of ColdFlushCleanup
+func (mr *MockdatabaseCleanupManagerMockRecorder) ColdFlushCleanup(t, isBootstrapped interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ColdFlushCleanup", reflect.TypeOf((*MockdatabaseCleanupManager)(nil).ColdFlushCleanup), t, isBootstrapped)
}
// Report mocks base method
@@ -2412,20 +2606,6 @@ func (m *MockdatabaseFileSystemManager) EXPECT() *MockdatabaseFileSystemManagerM
return m.recorder
}
-// Cleanup mocks base method
-func (m *MockdatabaseFileSystemManager) Cleanup(t time.Time) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Cleanup", t)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// Cleanup indicates an expected call of Cleanup
-func (mr *MockdatabaseFileSystemManagerMockRecorder) Cleanup(t interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockdatabaseFileSystemManager)(nil).Cleanup), t)
-}
-
// Flush mocks base method
func (m *MockdatabaseFileSystemManager) Flush(t time.Time) error {
m.ctrl.T.Helper()
@@ -2509,10 +2689,10 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) Report() *gomock.Call {
}
// LastSuccessfulSnapshotStartTime mocks base method
-func (m *MockdatabaseFileSystemManager) LastSuccessfulSnapshotStartTime() (time.Time, bool) {
+func (m *MockdatabaseFileSystemManager) LastSuccessfulSnapshotStartTime() (time0.UnixNano, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime")
- ret0, _ := ret[0].(time.Time)
+ ret0, _ := ret[0].(time0.UnixNano)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
@@ -2523,6 +2703,125 @@ func (mr *MockdatabaseFileSystemManagerMockRecorder) LastSuccessfulSnapshotStart
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastSuccessfulSnapshotStartTime", reflect.TypeOf((*MockdatabaseFileSystemManager)(nil).LastSuccessfulSnapshotStartTime))
}
+// MockdatabaseColdFlushManager is a mock of databaseColdFlushManager interface
+type MockdatabaseColdFlushManager struct {
+ ctrl *gomock.Controller
+ recorder *MockdatabaseColdFlushManagerMockRecorder
+}
+
+// MockdatabaseColdFlushManagerMockRecorder is the mock recorder for MockdatabaseColdFlushManager
+type MockdatabaseColdFlushManagerMockRecorder struct {
+ mock *MockdatabaseColdFlushManager
+}
+
+// NewMockdatabaseColdFlushManager creates a new mock instance
+func NewMockdatabaseColdFlushManager(ctrl *gomock.Controller) *MockdatabaseColdFlushManager {
+ mock := &MockdatabaseColdFlushManager{ctrl: ctrl}
+ mock.recorder = &MockdatabaseColdFlushManagerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockdatabaseColdFlushManager) EXPECT() *MockdatabaseColdFlushManagerMockRecorder {
+ return m.recorder
+}
+
+// WarmFlushCleanup mocks base method
+func (m *MockdatabaseColdFlushManager) WarmFlushCleanup(t time.Time, isBootstrapped bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "WarmFlushCleanup", t, isBootstrapped)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// WarmFlushCleanup indicates an expected call of WarmFlushCleanup
+func (mr *MockdatabaseColdFlushManagerMockRecorder) WarmFlushCleanup(t, isBootstrapped interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WarmFlushCleanup", reflect.TypeOf((*MockdatabaseColdFlushManager)(nil).WarmFlushCleanup), t, isBootstrapped)
+}
+
+// ColdFlushCleanup mocks base method
+func (m *MockdatabaseColdFlushManager) ColdFlushCleanup(t time.Time, isBootstrapped bool) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ColdFlushCleanup", t, isBootstrapped)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ColdFlushCleanup indicates an expected call of ColdFlushCleanup
+func (mr *MockdatabaseColdFlushManagerMockRecorder) ColdFlushCleanup(t, isBootstrapped interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ColdFlushCleanup", reflect.TypeOf((*MockdatabaseColdFlushManager)(nil).ColdFlushCleanup), t, isBootstrapped)
+}
+
+// Report mocks base method
+func (m *MockdatabaseColdFlushManager) Report() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Report")
+}
+
+// Report indicates an expected call of Report
+func (mr *MockdatabaseColdFlushManagerMockRecorder) Report() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Report", reflect.TypeOf((*MockdatabaseColdFlushManager)(nil).Report))
+}
+
+// Disable mocks base method
+func (m *MockdatabaseColdFlushManager) Disable() fileOpStatus {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Disable")
+ ret0, _ := ret[0].(fileOpStatus)
+ return ret0
+}
+
+// Disable indicates an expected call of Disable
+func (mr *MockdatabaseColdFlushManagerMockRecorder) Disable() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disable", reflect.TypeOf((*MockdatabaseColdFlushManager)(nil).Disable))
+}
+
+// Enable mocks base method
+func (m *MockdatabaseColdFlushManager) Enable() fileOpStatus {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Enable")
+ ret0, _ := ret[0].(fileOpStatus)
+ return ret0
+}
+
+// Enable indicates an expected call of Enable
+func (mr *MockdatabaseColdFlushManagerMockRecorder) Enable() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enable", reflect.TypeOf((*MockdatabaseColdFlushManager)(nil).Enable))
+}
+
+// Status mocks base method
+func (m *MockdatabaseColdFlushManager) Status() fileOpStatus {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Status")
+ ret0, _ := ret[0].(fileOpStatus)
+ return ret0
+}
+
+// Status indicates an expected call of Status
+func (mr *MockdatabaseColdFlushManagerMockRecorder) Status() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockdatabaseColdFlushManager)(nil).Status))
+}
+
+// Run mocks base method
+func (m *MockdatabaseColdFlushManager) Run(t time.Time) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Run", t)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// Run indicates an expected call of Run
+func (mr *MockdatabaseColdFlushManagerMockRecorder) Run(t interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockdatabaseColdFlushManager)(nil).Run), t)
+}
+
// MockdatabaseShardRepairer is a mock of databaseShardRepairer interface
type MockdatabaseShardRepairer struct {
ctrl *gomock.Controller
@@ -2737,10 +3036,10 @@ func (mr *MockdatabaseMediatorMockRecorder) IsBootstrapped() *gomock.Call {
}
// LastBootstrapCompletionTime mocks base method
-func (m *MockdatabaseMediator) LastBootstrapCompletionTime() (time.Time, bool) {
+func (m *MockdatabaseMediator) LastBootstrapCompletionTime() (time0.UnixNano, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LastBootstrapCompletionTime")
- ret0, _ := ret[0].(time.Time)
+ ret0, _ := ret[0].(time0.UnixNano)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
@@ -2766,16 +3065,16 @@ func (mr *MockdatabaseMediatorMockRecorder) Bootstrap() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bootstrap", reflect.TypeOf((*MockdatabaseMediator)(nil).Bootstrap))
}
-// DisableFileOps mocks base method
-func (m *MockdatabaseMediator) DisableFileOps() {
+// DisableFileOpsAndWait mocks base method
+func (m *MockdatabaseMediator) DisableFileOpsAndWait() {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "DisableFileOps")
+ m.ctrl.Call(m, "DisableFileOpsAndWait")
}
-// DisableFileOps indicates an expected call of DisableFileOps
-func (mr *MockdatabaseMediatorMockRecorder) DisableFileOps() *gomock.Call {
+// DisableFileOpsAndWait indicates an expected call of DisableFileOpsAndWait
+func (mr *MockdatabaseMediatorMockRecorder) DisableFileOpsAndWait() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableFileOps", reflect.TypeOf((*MockdatabaseMediator)(nil).DisableFileOps))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableFileOpsAndWait", reflect.TypeOf((*MockdatabaseMediator)(nil).DisableFileOpsAndWait))
}
// EnableFileOps mocks base method
@@ -2845,10 +3144,10 @@ func (mr *MockdatabaseMediatorMockRecorder) Report() *gomock.Call {
}
// LastSuccessfulSnapshotStartTime mocks base method
-func (m *MockdatabaseMediator) LastSuccessfulSnapshotStartTime() (time.Time, bool) {
+func (m *MockdatabaseMediator) LastSuccessfulSnapshotStartTime() (time0.UnixNano, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LastSuccessfulSnapshotStartTime")
- ret0, _ := ret[0].(time.Time)
+ ret0, _ := ret[0].(time0.UnixNano)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
@@ -2859,6 +3158,95 @@ func (mr *MockdatabaseMediatorMockRecorder) LastSuccessfulSnapshotStartTime() *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastSuccessfulSnapshotStartTime", reflect.TypeOf((*MockdatabaseMediator)(nil).LastSuccessfulSnapshotStartTime))
}
+// MockOnColdFlush is a mock of OnColdFlush interface
+type MockOnColdFlush struct {
+ ctrl *gomock.Controller
+ recorder *MockOnColdFlushMockRecorder
+}
+
+// MockOnColdFlushMockRecorder is the mock recorder for MockOnColdFlush
+type MockOnColdFlushMockRecorder struct {
+ mock *MockOnColdFlush
+}
+
+// NewMockOnColdFlush creates a new mock instance
+func NewMockOnColdFlush(ctrl *gomock.Controller) *MockOnColdFlush {
+ mock := &MockOnColdFlush{ctrl: ctrl}
+ mock.recorder = &MockOnColdFlushMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockOnColdFlush) EXPECT() *MockOnColdFlushMockRecorder {
+ return m.recorder
+}
+
+// ColdFlushNamespace mocks base method
+func (m *MockOnColdFlush) ColdFlushNamespace(ns Namespace) (OnColdFlushNamespace, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ColdFlushNamespace", ns)
+ ret0, _ := ret[0].(OnColdFlushNamespace)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ColdFlushNamespace indicates an expected call of ColdFlushNamespace
+func (mr *MockOnColdFlushMockRecorder) ColdFlushNamespace(ns interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ColdFlushNamespace", reflect.TypeOf((*MockOnColdFlush)(nil).ColdFlushNamespace), ns)
+}
+
+// MockOnColdFlushNamespace is a mock of OnColdFlushNamespace interface
+type MockOnColdFlushNamespace struct {
+ ctrl *gomock.Controller
+ recorder *MockOnColdFlushNamespaceMockRecorder
+}
+
+// MockOnColdFlushNamespaceMockRecorder is the mock recorder for MockOnColdFlushNamespace
+type MockOnColdFlushNamespaceMockRecorder struct {
+ mock *MockOnColdFlushNamespace
+}
+
+// NewMockOnColdFlushNamespace creates a new mock instance
+func NewMockOnColdFlushNamespace(ctrl *gomock.Controller) *MockOnColdFlushNamespace {
+ mock := &MockOnColdFlushNamespace{ctrl: ctrl}
+ mock.recorder = &MockOnColdFlushNamespaceMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockOnColdFlushNamespace) EXPECT() *MockOnColdFlushNamespaceMockRecorder {
+ return m.recorder
+}
+
+// OnFlushNewSeries mocks base method
+func (m *MockOnColdFlushNamespace) OnFlushNewSeries(arg0 persist.OnFlushNewSeriesEvent) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "OnFlushNewSeries", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// OnFlushNewSeries indicates an expected call of OnFlushNewSeries
+func (mr *MockOnColdFlushNamespaceMockRecorder) OnFlushNewSeries(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnFlushNewSeries", reflect.TypeOf((*MockOnColdFlushNamespace)(nil).OnFlushNewSeries), arg0)
+}
+
+// Done mocks base method
+func (m *MockOnColdFlushNamespace) Done() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Done")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Done indicates an expected call of Done
+func (mr *MockOnColdFlushNamespaceMockRecorder) Done() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockOnColdFlushNamespace)(nil).Done))
+}
+
// MockOptions is a mock of Options interface
type MockOptions struct {
ctrl *gomock.Controller
@@ -3723,7 +4111,7 @@ func (mr *MockOptionsMockRecorder) QueryIDsWorkerPool() *gomock.Call {
}
// SetWriteBatchPool mocks base method
-func (m *MockOptions) SetWriteBatchPool(value *ts.WriteBatchPool) Options {
+func (m *MockOptions) SetWriteBatchPool(value *writes.WriteBatchPool) Options {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetWriteBatchPool", value)
ret0, _ := ret[0].(Options)
@@ -3737,10 +4125,10 @@ func (mr *MockOptionsMockRecorder) SetWriteBatchPool(value interface{}) *gomock.
}
// WriteBatchPool mocks base method
-func (m *MockOptions) WriteBatchPool() *ts.WriteBatchPool {
+func (m *MockOptions) WriteBatchPool() *writes.WriteBatchPool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WriteBatchPool")
- ret0, _ := ret[0].(*ts.WriteBatchPool)
+ ret0, _ := ret[0].(*writes.WriteBatchPool)
return ret0
}
@@ -3918,6 +4306,34 @@ func (mr *MockOptionsMockRecorder) BlockLeaseManager() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockLeaseManager", reflect.TypeOf((*MockOptions)(nil).BlockLeaseManager))
}
+// SetOnColdFlush mocks base method
+func (m *MockOptions) SetOnColdFlush(value OnColdFlush) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetOnColdFlush", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetOnColdFlush indicates an expected call of SetOnColdFlush
+func (mr *MockOptionsMockRecorder) SetOnColdFlush(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetOnColdFlush", reflect.TypeOf((*MockOptions)(nil).SetOnColdFlush), value)
+}
+
+// OnColdFlush mocks base method
+func (m *MockOptions) OnColdFlush() OnColdFlush {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "OnColdFlush")
+ ret0, _ := ret[0].(OnColdFlush)
+ return ret0
+}
+
+// OnColdFlush indicates an expected call of OnColdFlush
+func (mr *MockOptionsMockRecorder) OnColdFlush() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnColdFlush", reflect.TypeOf((*MockOptions)(nil).OnColdFlush))
+}
+
// SetMemoryTracker mocks base method
func (m *MockOptions) SetMemoryTracker(memTracker MemoryTracker) Options {
m.ctrl.T.Helper()
@@ -3974,6 +4390,90 @@ func (mr *MockOptionsMockRecorder) MmapReporter() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MmapReporter", reflect.TypeOf((*MockOptions)(nil).MmapReporter))
}
+// SetDoNotIndexWithFieldsMap mocks base method
+func (m *MockOptions) SetDoNotIndexWithFieldsMap(value map[string]string) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetDoNotIndexWithFieldsMap", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetDoNotIndexWithFieldsMap indicates an expected call of SetDoNotIndexWithFieldsMap
+func (mr *MockOptionsMockRecorder) SetDoNotIndexWithFieldsMap(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDoNotIndexWithFieldsMap", reflect.TypeOf((*MockOptions)(nil).SetDoNotIndexWithFieldsMap), value)
+}
+
+// DoNotIndexWithFieldsMap mocks base method
+func (m *MockOptions) DoNotIndexWithFieldsMap() map[string]string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DoNotIndexWithFieldsMap")
+ ret0, _ := ret[0].(map[string]string)
+ return ret0
+}
+
+// DoNotIndexWithFieldsMap indicates an expected call of DoNotIndexWithFieldsMap
+func (mr *MockOptionsMockRecorder) DoNotIndexWithFieldsMap() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoNotIndexWithFieldsMap", reflect.TypeOf((*MockOptions)(nil).DoNotIndexWithFieldsMap))
+}
+
+// SetNamespaceRuntimeOptionsManagerRegistry mocks base method
+func (m *MockOptions) SetNamespaceRuntimeOptionsManagerRegistry(value namespace.RuntimeOptionsManagerRegistry) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetNamespaceRuntimeOptionsManagerRegistry", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetNamespaceRuntimeOptionsManagerRegistry indicates an expected call of SetNamespaceRuntimeOptionsManagerRegistry
+func (mr *MockOptionsMockRecorder) SetNamespaceRuntimeOptionsManagerRegistry(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNamespaceRuntimeOptionsManagerRegistry", reflect.TypeOf((*MockOptions)(nil).SetNamespaceRuntimeOptionsManagerRegistry), value)
+}
+
+// NamespaceRuntimeOptionsManagerRegistry mocks base method
+func (m *MockOptions) NamespaceRuntimeOptionsManagerRegistry() namespace.RuntimeOptionsManagerRegistry {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NamespaceRuntimeOptionsManagerRegistry")
+ ret0, _ := ret[0].(namespace.RuntimeOptionsManagerRegistry)
+ return ret0
+}
+
+// NamespaceRuntimeOptionsManagerRegistry indicates an expected call of NamespaceRuntimeOptionsManagerRegistry
+func (mr *MockOptionsMockRecorder) NamespaceRuntimeOptionsManagerRegistry() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NamespaceRuntimeOptionsManagerRegistry", reflect.TypeOf((*MockOptions)(nil).NamespaceRuntimeOptionsManagerRegistry))
+}
+
+// SetMediatorTickInterval mocks base method
+func (m *MockOptions) SetMediatorTickInterval(value time.Duration) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetMediatorTickInterval", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetMediatorTickInterval indicates an expected call of SetMediatorTickInterval
+func (mr *MockOptionsMockRecorder) SetMediatorTickInterval(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMediatorTickInterval", reflect.TypeOf((*MockOptions)(nil).SetMediatorTickInterval), value)
+}
+
+// MediatorTickInterval mocks base method
+func (m *MockOptions) MediatorTickInterval() time.Duration {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MediatorTickInterval")
+ ret0, _ := ret[0].(time.Duration)
+ return ret0
+}
+
+// MediatorTickInterval indicates an expected call of MediatorTickInterval
+func (mr *MockOptionsMockRecorder) MediatorTickInterval() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MediatorTickInterval", reflect.TypeOf((*MockOptions)(nil).MediatorTickInterval))
+}
+
// MockMemoryTracker is a mock of MemoryTracker interface
type MockMemoryTracker struct {
ctrl *gomock.Controller
diff --git a/src/dbnode/storage/tick.go b/src/dbnode/storage/tick.go
index 7e097d262d..33585e8517 100644
--- a/src/dbnode/storage/tick.go
+++ b/src/dbnode/storage/tick.go
@@ -152,7 +152,7 @@ func (mgr *tickManager) Tick(forceType forceType, startTime time.Time) error {
// Now we acquired the token, reset the cancellable
mgr.c.Reset()
- namespaces, err := mgr.database.GetOwnedNamespaces()
+ namespaces, err := mgr.database.OwnedNamespaces()
if err != nil {
return err
}
diff --git a/src/dbnode/storage/types.go b/src/dbnode/storage/types.go
index 4f0c14cf99..bc8e79d47a 100644
--- a/src/dbnode/storage/types.go
+++ b/src/dbnode/storage/types.go
@@ -41,8 +41,10 @@ import (
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/storage/series/lookup"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -122,17 +124,17 @@ type Database interface {
// Note that when using the BatchWriter the caller owns the lifecycle of the series
// IDs if they're being pooled its the callers responsibility to return them to the
// appropriate pool, but the encoded tags and annotations are owned by the
- // ts.WriteBatch itself and will be finalized when the entire ts.WriteBatch is finalized
+ // writes.WriteBatch itself and will be finalized when the entire writes.WriteBatch is finalized
// due to their lifecycle being more complicated.
// Callers can still control the pooling of the encoded tags and annotations by using
// the SetFinalizeEncodedTagsFn and SetFinalizeAnnotationFn on the WriteBatch itself.
- BatchWriter(namespace ident.ID, batchSize int) (ts.BatchWriter, error)
+ BatchWriter(namespace ident.ID, batchSize int) (writes.BatchWriter, error)
// WriteBatch is the same as Write, but in batch.
WriteBatch(
ctx context.Context,
namespace ident.ID,
- writes ts.BatchWriter,
+ writes writes.BatchWriter,
errHandler IndexedErrorHandler,
) error
@@ -140,7 +142,7 @@ type Database interface {
WriteTaggedBatch(
ctx context.Context,
namespace ident.ID,
- writes ts.BatchWriter,
+ writes writes.BatchWriter,
errHandler IndexedErrorHandler,
) error
@@ -223,8 +225,8 @@ type Database interface {
type database interface {
Database
- // GetOwnedNamespaces returns the namespaces this database owns.
- GetOwnedNamespaces() ([]databaseNamespace, error)
+ // OwnedNamespaces returns the namespaces this database owns.
+ OwnedNamespaces() ([]databaseNamespace, error)
// UpdateOwnedNamespaces updates the namespaces this database owns.
UpdateOwnedNamespaces(namespaces namespace.Map) error
@@ -249,6 +251,12 @@ type Namespace interface {
// Shards returns the shard description.
Shards() []Shard
+
+ // Index returns the reverse index backing the namespace, if it exists.
+ Index() (NamespaceIndex, error)
+
+ // StorageOptions returns storage options.
+ StorageOptions() Options
}
// NamespacesByID is a sortable slice of namespaces by ID.
@@ -260,6 +268,14 @@ func (n NamespacesByID) Less(i, j int) bool {
return bytes.Compare(n[i].ID().Bytes(), n[j].ID().Bytes()) < 0
}
+// SeriesWrite is a result of a series write.
+type SeriesWrite struct {
+ Series ts.Series
+ WasWritten bool
+ NeedsIndex bool
+ PendingIndexInsert writes.PendingIndexInsert
+}
+
type databaseNamespace interface {
Namespace
@@ -269,11 +285,8 @@ type databaseNamespace interface {
// AssignShardSet sets the shard set assignment and returns immediately.
AssignShardSet(shardSet sharding.ShardSet)
- // GetOwnedShards returns the database shards.
- GetOwnedShards() []databaseShard
-
- // GetIndex returns the reverse index backing the namespace, if it exists.
- GetIndex() (namespaceIndex, error)
+ // OwnedShards returns the database shards.
+ OwnedShards() []databaseShard
// Tick performs any regular maintenance operations.
Tick(c context.Cancellable, startTime time.Time) error
@@ -286,7 +299,7 @@ type databaseNamespace interface {
value float64,
unit xtime.Unit,
annotation []byte,
- ) (ts.Series, bool, error)
+ ) (SeriesWrite, error)
// WriteTagged values to the namespace for an ID.
WriteTagged(
@@ -297,7 +310,7 @@ type databaseNamespace interface {
value float64,
unit xtime.Unit,
annotation []byte,
- ) (ts.Series, bool, error)
+ ) (SeriesWrite, error)
// QueryIDs resolves the given query into known IDs.
QueryIDs(
@@ -342,10 +355,10 @@ type databaseNamespace interface {
// PrepareBootstrap prepares the namespace for bootstrapping by ensuring
// it's shards know which flushed files reside on disk, so that calls
// to series.LoadBlock(...) will succeed.
- PrepareBootstrap() ([]databaseShard, error)
+ PrepareBootstrap(ctx context.Context) ([]databaseShard, error)
// Bootstrap marks shards as bootstrapped for the namespace.
- Bootstrap(bootstrapResult bootstrap.NamespaceResult) error
+ Bootstrap(ctx context.Context, bootstrapResult bootstrap.NamespaceResult) error
// WarmFlush flushes in-memory WarmWrites.
WarmFlush(blockStart time.Time, flush persist.FlushPreparer) error
@@ -389,6 +402,9 @@ type databaseNamespace interface {
id ident.ID,
tags ident.TagIterator,
) (result SeriesReadWriteRef, owned bool, err error)
+
+ // WritePendingIndexInserts will write any pending index inserts.
+ WritePendingIndexInserts(pending []writes.PendingIndexInsert) error
}
// SeriesReadWriteRef is a read/write reference for a series,
@@ -444,7 +460,7 @@ type databaseShard interface {
unit xtime.Unit,
annotation []byte,
wOpts series.WriteOptions,
- ) (ts.Series, bool, error)
+ ) (SeriesWrite, error)
// WriteTagged writes a value to the shard for an ID with tags.
WriteTagged(
@@ -456,7 +472,7 @@ type databaseShard interface {
unit xtime.Unit,
annotation []byte,
wOpts series.WriteOptions,
- ) (ts.Series, bool, error)
+ ) (SeriesWrite, error)
ReadEncoded(
ctx context.Context,
@@ -483,7 +499,7 @@ type databaseShard interface {
start time.Time,
version int,
nsCtx namespace.Context,
- ) ([]xio.BlockReader, error)
+ ) (block.FetchBlockResult, error)
// FetchBlocksMetadataV2 retrieves blocks metadata.
FetchBlocksMetadataV2(
@@ -496,11 +512,11 @@ type databaseShard interface {
// PrepareBootstrap prepares the shard for bootstrapping by ensuring
// it knows which flushed files reside on disk.
- PrepareBootstrap() error
+ PrepareBootstrap(ctx context.Context) error
// Bootstrap bootstraps the shard after all provided data
// has been loaded using LoadBootstrapBlocks.
- Bootstrap() error
+ Bootstrap(ctx context.Context, nsCtx namespace.Context) error
// UpdateFlushStates updates all the flush states for the current shard
// by checking the file volumes that exist on disk at a point in time.
@@ -523,7 +539,8 @@ type databaseShard interface {
flush persist.FlushPreparer,
resources coldFlushReuseableResources,
nsCtx namespace.Context,
- ) error
+ onFlush persist.OnFlushSeries,
+ ) (ShardColdFlush, error)
// Snapshot snapshot's the unflushed WarmWrites in this shard.
Snapshot(
@@ -531,7 +548,7 @@ type databaseShard interface {
snapshotStart time.Time,
flush persist.SnapshotPreparer,
nsCtx namespace.Context,
- ) error
+ ) (ShardSnapshotResult, error)
// FlushState returns the flush state for this shard at block start.
FlushState(blockStart time.Time) (fileOpState, error)
@@ -553,11 +570,6 @@ type databaseShard interface {
repairer databaseShardRepairer,
) (repair.MetadataComparisonResult, error)
- // TagsFromSeriesID returns the series tags from a series ID.
- // TODO(r): Seems like this is a work around that shouldn't be
- // necessary given the callsites that current exist?
- TagsFromSeriesID(seriesID ident.ID) (ident.Tags, bool, error)
-
// SeriesReadWriteRef returns a read/write ref to a series, callers
// must make sure to call the release callback once finished
// with the reference.
@@ -566,6 +578,20 @@ type databaseShard interface {
tags ident.TagIterator,
opts ShardSeriesReadWriteRefOptions,
) (SeriesReadWriteRef, error)
+
+ // DocRef returns the doc if already present in a shard series.
+ DocRef(id ident.ID) (doc.Document, bool, error)
+}
+
+// ShardSnapshotResult is a result from a shard snapshot.
+type ShardSnapshotResult struct {
+ SeriesPersist int
+}
+
+// ShardColdFlush exposes a done method to finalize shard cold flush
+// by persisting data and updating shard state/block leases.
+type ShardColdFlush interface {
+ Done() error
}
// ShardSeriesReadWriteRefOptions are options for SeriesReadWriteRef
@@ -574,8 +600,8 @@ type ShardSeriesReadWriteRefOptions struct {
ReverseIndex bool
}
-// namespaceIndex indexes namespace writes.
-type namespaceIndex interface {
+// NamespaceIndex indexes namespace writes.
+type NamespaceIndex interface {
// AssignShardSet sets the shard set assignment and returns immediately.
AssignShardSet(shardSet sharding.ShardSet)
@@ -585,11 +611,21 @@ type namespaceIndex interface {
writeTime time.Time,
) xtime.UnixNano
+ // BlockForBlockStart returns an index block for a block start.
+ BlockForBlockStart(
+ blockStart time.Time,
+ ) (index.Block, error)
+
// WriteBatch indexes the provided entries.
WriteBatch(
batch *index.WriteBatch,
) error
+ // WritePending indexes the provided pending entries.
+ WritePending(
+ pending []writes.PendingIndexInsert,
+ ) error
+
// Query resolves the given query into known IDs.
Query(
ctx context.Context,
@@ -616,29 +652,51 @@ type namespaceIndex interface {
// using the provided `t` as the frame of reference.
CleanupExpiredFileSets(t time.Time) error
+ // CleanupDuplicateFileSets removes duplicate fileset files.
+ CleanupDuplicateFileSets() error
+
// Tick performs internal house keeping in the index, including block rotation,
// data eviction, and so on.
Tick(c context.Cancellable, startTime time.Time) (namespaceIndexTickResult, error)
- // Flush performs any flushes that the index has outstanding using
+ // WarmFlush performs any warm flushes that the index has outstanding using
// the owned shards of the database.
- Flush(
+ WarmFlush(
flush persist.IndexFlush,
shards []databaseShard,
) error
+ // ColdFlush performs any cold flushes that the index has outstanding using
+ // the owned shards of the database. Also returns a callback to be called when
+ // cold flushing completes to perform houskeeping.
+ ColdFlush(shards []databaseShard) (OnColdFlushDone, error)
+
+ // DebugMemorySegments allows for debugging memory segments.
+ DebugMemorySegments(opts DebugMemorySegmentsOptions) error
+
// Close will release the index resources and close the index.
Close() error
}
+// OnColdFlushDone is a callback that performs house keeping once cold flushing completes.
+type OnColdFlushDone func() error
+
+// DebugMemorySegmentsOptions is a set of options to debug memory segments.
+type DebugMemorySegmentsOptions struct {
+ OutputDirectory string
+}
+
// namespaceIndexTickResult are details about the work performed by the namespaceIndex
// during a Tick().
type namespaceIndexTickResult struct {
- NumBlocks int64
- NumBlocksSealed int64
- NumBlocksEvicted int64
- NumSegments int64
- NumTotalDocs int64
+ NumBlocks int64
+ NumBlocksSealed int64
+ NumBlocksEvicted int64
+ NumSegments int64
+ NumSegmentsBootstrapped int64
+ NumSegmentsMutable int64
+ NumTotalDocs int64
+ FreeMmap int64
}
// namespaceIndexInsertQueue is a queue used in-front of the indexing component
@@ -654,7 +712,17 @@ type namespaceIndexInsertQueue interface {
// inserts to the index asynchronously. It executes the provided callbacks
// based on the result of the execution. The returned wait group can be used
// if the insert is required to be synchronous.
- InsertBatch(batch *index.WriteBatch) (*sync.WaitGroup, error)
+ InsertBatch(
+ batch *index.WriteBatch,
+ ) (*sync.WaitGroup, error)
+
+ // InsertPending inserts the provided documents to the index queue which processes
+ // inserts to the index asynchronously. It executes the provided callbacks
+ // based on the result of the execution. The returned wait group can be used
+ // if the insert is required to be synchronous.
+ InsertPending(
+ pending []writes.PendingIndexInsert,
+ ) (*sync.WaitGroup, error)
}
// databaseBootstrapManager manages the bootstrap process.
@@ -664,7 +732,7 @@ type databaseBootstrapManager interface {
// LastBootstrapCompletionTime returns the last bootstrap completion time,
// if any.
- LastBootstrapCompletionTime() (time.Time, bool)
+ LastBootstrapCompletionTime() (xtime.UnixNano, bool)
// Bootstrap performs bootstrapping for all namespaces and shards owned.
Bootstrap() (BootstrapResult, error)
@@ -686,16 +754,21 @@ type databaseFlushManager interface {
// LastSuccessfulSnapshotStartTime returns the start time of the last
// successful snapshot, if any.
- LastSuccessfulSnapshotStartTime() (time.Time, bool)
+ LastSuccessfulSnapshotStartTime() (xtime.UnixNano, bool)
// Report reports runtime information.
Report()
}
// databaseCleanupManager manages cleaning up persistent storage space.
+// NB(bodu): We have to separate flush methods since we separated out flushing into warm/cold flush
+// and cleaning up certain types of data concurrently w/ either can be problematic.
type databaseCleanupManager interface {
- // Cleanup cleans up data not needed in the persistent storage.
- Cleanup(t time.Time) error
+ // WarmFlushCleanup cleans up data not needed in the persistent storage before a warm flush.
+ WarmFlushCleanup(t time.Time, isBootstrapped bool) error
+
+ // ColdFlushCleanup cleans up data not needed in the persistent storage before a cold flush.
+ ColdFlushCleanup(t time.Time, isBootstrapped bool) error
// Report reports runtime information.
Report()
@@ -703,9 +776,6 @@ type databaseCleanupManager interface {
// databaseFileSystemManager manages the database related filesystem activities.
type databaseFileSystemManager interface {
- // Cleanup cleans up data not needed in the persistent storage.
- Cleanup(t time.Time) error
-
// Flush flushes in-memory data to persistent storage.
Flush(t time.Time) error
@@ -732,7 +802,26 @@ type databaseFileSystemManager interface {
// LastSuccessfulSnapshotStartTime returns the start time of the last
// successful snapshot, if any.
- LastSuccessfulSnapshotStartTime() (time.Time, bool)
+ LastSuccessfulSnapshotStartTime() (xtime.UnixNano, bool)
+}
+
+// databaseColdFlushManager manages the database related cold flush activities.
+type databaseColdFlushManager interface {
+ databaseCleanupManager
+
+ // Disable disables the cold flush manager and prevents it from
+ // performing file operations, returns the current file operation status.
+ Disable() fileOpStatus
+
+ // Enable enables the cold flush manager to perform file operations.
+ Enable() fileOpStatus
+
+ // Status returns the file operation status.
+ Status() fileOpStatus
+
+ // Run attempts to perform all cold flush related operations,
+ // returning true if those operations are performed, and false otherwise.
+ Run(t time.Time) bool
}
// databaseShardRepairer repairs in-memory data for a shard.
@@ -783,13 +872,13 @@ type databaseMediator interface {
// LastBootstrapCompletionTime returns the last bootstrap completion time,
// if any.
- LastBootstrapCompletionTime() (time.Time, bool)
+ LastBootstrapCompletionTime() (xtime.UnixNano, bool)
// Bootstrap bootstraps the database with file operations performed at the end.
Bootstrap() (BootstrapResult, error)
- // DisableFileOps disables file operations.
- DisableFileOps()
+ // DisableFileOpsAndWait disables file operations.
+ DisableFileOpsAndWait()
// EnableFileOps enables file operations.
EnableFileOps()
@@ -808,7 +897,18 @@ type databaseMediator interface {
// LastSuccessfulSnapshotStartTime returns the start time of the last
// successful snapshot, if any.
- LastSuccessfulSnapshotStartTime() (time.Time, bool)
+ LastSuccessfulSnapshotStartTime() (xtime.UnixNano, bool)
+}
+
+// OnColdFlush can perform work each time a series is flushed.
+type OnColdFlush interface {
+ ColdFlushNamespace(ns Namespace) (OnColdFlushNamespace, error)
+}
+
+// OnColdFlushNamespace performs work on a per namespace level.
+type OnColdFlushNamespace interface {
+ persist.OnFlushSeries
+ Done() error
}
// Options represents the options for storage.
@@ -1007,10 +1107,10 @@ type Options interface {
QueryIDsWorkerPool() xsync.WorkerPool
// SetWriteBatchPool sets the WriteBatch pool.
- SetWriteBatchPool(value *ts.WriteBatchPool) Options
+ SetWriteBatchPool(value *writes.WriteBatchPool) Options
// WriteBatchPool returns the WriteBatch pool.
- WriteBatchPool() *ts.WriteBatchPool
+ WriteBatchPool() *writes.WriteBatchPool
// SetBufferBucketPool sets the BufferBucket pool.
SetBufferBucketPool(value *series.BufferBucketPool) Options
@@ -1048,6 +1148,12 @@ type Options interface {
// BlockLeaseManager returns the block leaser.
BlockLeaseManager() block.LeaseManager
+ // SetOnColdFlush sets the on cold flush processor.
+ SetOnColdFlush(value OnColdFlush) Options
+
+ // OnColdFlush returns the on cold flush processor.
+ OnColdFlush() OnColdFlush
+
// SetMemoryTracker sets the MemoryTracker.
SetMemoryTracker(memTracker MemoryTracker) Options
@@ -1059,6 +1165,26 @@ type Options interface {
// MmapReporter returns the mmap reporter.
MmapReporter() mmap.Reporter
+
+ // SetDoNotIndexWithFieldsMap sets a map which if fields match it
+ // will not index those metrics.
+ SetDoNotIndexWithFieldsMap(value map[string]string) Options
+
+ // DoNotIndexWithFieldsMap returns a map which if fields match it
+ // will not index those metrics.
+ DoNotIndexWithFieldsMap() map[string]string
+
+ // SetNamespaceRuntimeOptionsManagerRegistry sets the namespace runtime options manager.
+ SetNamespaceRuntimeOptionsManagerRegistry(value namespace.RuntimeOptionsManagerRegistry) Options
+
+ // NamespaceRuntimeOptionsManagerRegistry returns the namespace runtime options manager.
+ NamespaceRuntimeOptionsManagerRegistry() namespace.RuntimeOptionsManagerRegistry
+
+ // SetMediatorTickInterval sets the ticking interval for the medidator.
+ SetMediatorTickInterval(value time.Duration) Options
+
+ // MediatorTickInterval returns the ticking interval for the mediator.
+ MediatorTickInterval() time.Duration
}
// MemoryTracker tracks memory.
diff --git a/src/dbnode/tracepoint/tracepoint.go b/src/dbnode/tracepoint/tracepoint.go
index c1f6461d2a..2a3dab06ce 100644
--- a/src/dbnode/tracepoint/tracepoint.go
+++ b/src/dbnode/tracepoint/tracepoint.go
@@ -67,6 +67,18 @@ const (
// NSQueryIDs is the operation name for the dbNamespace QueryIDs path.
NSQueryIDs = "storage.dbNamespace.QueryIDs"
+ // NSPrepareBootstrap is the operation name for the dbNamespace PrepareBootstrap path.
+ NSPrepareBootstrap = "storage.dbNamespace.PrepareBootstrap"
+
+ // NSBootstrap is the operation name for the dbNamespace Bootstrap path.
+ NSBootstrap = "storage.dbNamespace.Bootstrap"
+
+ // ShardPrepareBootstrap is the operation name for the dbShard PrepareBootstrap path.
+ ShardPrepareBootstrap = "storage.dbShard.PrepareBootstrap"
+
+ // ShardBootstrap is the operation name for the dbShard Bootstrap path.
+ ShardBootstrap = "storage.dbShard.Bootstrap"
+
// NSIdxQuery is the operation name for the nsIndex Query path.
NSIdxQuery = "storage.nsIndex.Query"
@@ -87,4 +99,22 @@ const (
// BlockAggregate is the operation name for the index block aggregate path.
BlockAggregate = "storage/index.block.Aggregate"
+
+ // BootstrapProcessRun is the operation name for the bootstrap process Run path.
+ BootstrapProcessRun = "bootstrap.bootstrapProcess.Run"
+
+ // BootstrapperUninitializedSourceRead is the operation for the uninitializedTopologySource Read path.
+ BootstrapperUninitializedSourceRead = "bootstrapper.uninitialized.uninitializedTopologySource.Read"
+
+ // BootstrapperCommitLogSourceRead is the operation for the commit log Read path.
+ BootstrapperCommitLogSourceRead = "bootstrapper.commitlog.commitLogSource.Read"
+
+ // BootstrapperPeersSourceRead is the operation for the peers Read path.
+ BootstrapperPeersSourceRead = "bootstrapper.peers.peersSource.Read"
+
+ // BootstrapperFilesystemSourceRead is the operation for the filesystem Read path.
+ BootstrapperFilesystemSourceRead = "bootstrapper.fs.filesystemSource.Read"
+
+ // BootstrapperFilesystemSourceMigrator is the operation for filesystem migrator path.
+ BootstrapperFilesystemSourceMigrator = "bootstrapper.fs.filesystemSource.Migrator"
)
diff --git a/src/dbnode/ts/segment.go b/src/dbnode/ts/segment.go
index fd20c8979d..285fed4920 100644
--- a/src/dbnode/ts/segment.go
+++ b/src/dbnode/ts/segment.go
@@ -25,6 +25,8 @@ import (
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/pool"
+
+ "github.com/m3db/stackadler32"
)
// Segment represents a binary blob consisting of two byte slices and
@@ -32,12 +34,12 @@ import (
type Segment struct {
// Head is the head of the segment.
Head checked.Bytes
-
// Tail is the tail of the segment.
Tail checked.Bytes
-
// SegmentFlags declares whether to finalize when finalizing the segment.
Flags SegmentFlags
+ // checksum is the checksum for the segment.
+ checksum uint32
}
// SegmentFlags describes the option to finalize or not finalize
@@ -53,11 +55,29 @@ const (
FinalizeTail SegmentFlags = 1 << 2
)
+// CalculateChecksum calculates and sets the 32-bit checksum for
+// this segment avoiding any allocations.
+func (s *Segment) CalculateChecksum() uint32 {
+ if s.checksum != 0 {
+ return s.checksum
+ }
+ d := stackadler32.NewDigest()
+ if s.Head != nil {
+ d = d.Update(s.Head.Bytes())
+ }
+ if s.Tail != nil {
+ d = d.Update(s.Tail.Bytes())
+ }
+ s.checksum = d.Sum32()
+ return s.checksum
+}
+
// NewSegment will create a new segment and increment the refs to
// head and tail if they are non-nil. When finalized the segment will
// also finalize the byte slices if FinalizeBytes is passed.
func NewSegment(
head, tail checked.Bytes,
+ checksum uint32,
flags SegmentFlags,
) Segment {
if head != nil {
@@ -67,9 +87,10 @@ func NewSegment(
tail.IncRef()
}
return Segment{
- Head: head,
- Tail: tail,
- Flags: flags,
+ Head: head,
+ Tail: tail,
+ Flags: flags,
+ checksum: checksum,
}
}
@@ -162,5 +183,6 @@ func (s *Segment) Clone(pool pool.CheckedBytesPool) Segment {
}
// NB: new segment is always finalizeable.
- return NewSegment(checkedHead, checkedTail, FinalizeHead&FinalizeTail)
+ return NewSegment(checkedHead, checkedTail,
+ s.CalculateChecksum(), FinalizeHead&FinalizeTail)
}
diff --git a/src/dbnode/ts/segment_test.go b/src/dbnode/ts/segment_test.go
index 1b97efce25..e633fab70c 100644
--- a/src/dbnode/ts/segment_test.go
+++ b/src/dbnode/ts/segment_test.go
@@ -50,12 +50,14 @@ var (
type byteFunc func(d []byte) checked.Bytes
+var testChecksum uint32 = 2
+
func testSegmentCloneWithPools(
t *testing.T,
checkd byteFunc,
pool pool.CheckedBytesPool,
) {
- seg := NewSegment(checkd(head), checkd(tail), FinalizeNone)
+ seg := NewSegment(checkd(head), checkd(tail), testChecksum, FinalizeNone)
assert.Equal(t, len(expected), seg.Len())
cloned := seg.Clone(pool)
@@ -69,6 +71,7 @@ func testSegmentCloneWithPools(
assert.Equal(t, len(expected), cloned.Len())
assert.True(t, cloned.Flags|FinalizeHead > 0)
assert.True(t, cloned.Flags|FinalizeTail > 0)
+ assert.Equal(t, testChecksum, cloned.checksum)
cloned.Finalize()
seg.Finalize()
diff --git a/src/dbnode/ts/types.go b/src/dbnode/ts/types.go
index d2cd83d187..0460d9a25e 100644
--- a/src/dbnode/ts/types.go
+++ b/src/dbnode/ts/types.go
@@ -27,48 +27,6 @@ import (
xtime "github.com/m3db/m3/src/x/time"
)
-// FinalizeEncodedTagsFn is a function that will be called for each encoded tags once
-// the WriteBatch itself is finalized.
-type FinalizeEncodedTagsFn func(b []byte)
-
-// FinalizeAnnotationFn is a function that will be called for each annotation once
-// the WriteBatch itself is finalized.
-type FinalizeAnnotationFn func(b []byte)
-
-// Write is a write for the commitlog.
-type Write struct {
- Series Series
- Datapoint Datapoint
- Unit xtime.Unit
- Annotation Annotation
-}
-
-// BatchWrite represents a write that was added to the
-// BatchWriter.
-type BatchWrite struct {
- // Used by the commitlog. If this is false, the commitlog should not write
- // the series at this index.
- SkipWrite bool
- // Used by the commitlog (series needed to be updated by the shard
- // object first, cannot use the Series provided by the caller as it
- // is missing important fields like Tags.)
- Write Write
- // Not used by the commitlog, provided by the caller (since the request
- // is usually coming from over the wire) and is superseded by the Tags
- // in Write.Series which will get set by the Shard object.
- TagIter ident.TagIterator
- // EncodedTags is used by the commit log, but also held onto as a reference
- // here so that it can be returned to the pool after the write to commit log
- // completes (since the Write.Series gets overwritten in SetOutcome so can't
- // use the reference there for returning to the pool).
- EncodedTags EncodedTags
- // Used to help the caller tie errors back to an index in their
- // own collection.
- OriginalIndex int
- // Used by the commitlog.
- Err error
-}
-
// Series describes a series.
type Series struct {
// UniqueIndex is the unique index assigned to this series (only valid
@@ -81,9 +39,6 @@ type Series struct {
// ID is the series identifier.
ID ident.ID
- // Tags are the series tags.
- Tags ident.Tags
-
// EncodedTags are the series encoded tags, if set then call sites can
// avoid needing to encoded the tags from the series tags provided.
EncodedTags EncodedTags
@@ -94,8 +49,9 @@ type Series struct {
// A Datapoint is a single data value reported at a given time.
type Datapoint struct {
- Timestamp time.Time
- Value float64
+ Timestamp time.Time
+ TimestampNanos xtime.UnixNano
+ Value float64
}
// Equal returns whether one Datapoint is equal to another
@@ -108,48 +64,3 @@ type EncodedTags []byte
// Annotation represents information used to annotate datapoints.
type Annotation []byte
-
-// WriteBatch is the interface that supports adding writes to the batch,
-// as well as iterating through the batched writes and resetting the
-// struct (for pooling).
-type WriteBatch interface {
- BatchWriter
- // Can't use a real iterator pattern here as it slows things down.
- Iter() []BatchWrite
- SetOutcome(idx int, series Series, err error)
- SetSkipWrite(idx int)
- Reset(batchSize int, ns ident.ID)
- Finalize()
-
- // Returns the WriteBatch's internal capacity. Used by the pool to throw
- // away batches that have grown too large.
- cap() int
-}
-
-// BatchWriter is the interface that is used for preparing a batch of
-// writes.
-type BatchWriter interface {
- Add(
- originalIndex int,
- id ident.ID,
- timestamp time.Time,
- value float64,
- unit xtime.Unit,
- annotation []byte,
- ) error
-
- AddTagged(
- originalIndex int,
- id ident.ID,
- tags ident.TagIterator,
- encodedTags EncodedTags,
- timestamp time.Time,
- value float64,
- unit xtime.Unit,
- annotation []byte,
- ) error
-
- SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn)
-
- SetFinalizeAnnotationFn(f FinalizeAnnotationFn)
-}
diff --git a/src/dbnode/ts/writes/types.go b/src/dbnode/ts/writes/types.go
new file mode 100644
index 0000000000..bb8ac839f5
--- /dev/null
+++ b/src/dbnode/ts/writes/types.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package writes
+
+import (
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+// FinalizeEncodedTagsFn is a function that will be called for each encoded tags once
+// the WriteBatch itself is finalized.
+type FinalizeEncodedTagsFn func(b []byte)
+
+// FinalizeAnnotationFn is a function that will be called for each annotation once
+// the WriteBatch itself is finalized.
+type FinalizeAnnotationFn func(b []byte)
+
+// Write is a write for the commitlog.
+type Write struct {
+ Series ts.Series
+ Datapoint ts.Datapoint
+ Unit xtime.Unit
+ Annotation ts.Annotation
+}
+
+// PendingIndexInsert is a pending index insert.
+type PendingIndexInsert struct {
+ Entry index.WriteBatchEntry
+ Document doc.Document
+}
+
+// BatchWrite represents a write that was added to the
+// BatchWriter.
+type BatchWrite struct {
+ // Used by the commitlog. If this is false, the commitlog should not write
+ // the series at this index.
+ SkipWrite bool
+ // PendingIndex returns whether a write has a pending index.
+ PendingIndex bool
+ // Used by the commitlog (series needed to be updated by the shard
+ // object first, cannot use the Series provided by the caller as it
+ // is missing important fields like Tags.)
+ Write Write
+ // Not used by the commitlog, provided by the caller (since the request
+ // is usually coming from over the wire) and is superseded by the Tags
+ // in Write.Series which will get set by the Shard object.
+ TagIter ident.TagIterator
+ // EncodedTags is used by the commit log, but also held onto as a reference
+ // here so that it can be returned to the pool after the write to commit log
+ // completes (since the Write.Series gets overwritten in SetOutcome so can't
+ // use the reference there for returning to the pool).
+ EncodedTags ts.EncodedTags
+ // Used to help the caller tie errors back to an index in their
+ // own collection.
+ OriginalIndex int
+ // Used by the commitlog.
+ Err error
+}
+
+// WriteBatch is the interface that supports adding writes to the batch,
+// as well as iterating through the batched writes and resetting the
+// struct (for pooling).
+type WriteBatch interface {
+ BatchWriter
+ // Can't use a real iterator pattern here as it slows things down.
+ Iter() []BatchWrite
+ SetPendingIndex(idx int, pending PendingIndexInsert)
+ PendingIndex() []PendingIndexInsert
+ SetError(idx int, err error)
+ SetSeries(idx int, series ts.Series)
+ SetSkipWrite(idx int)
+ Reset(batchSize int, ns ident.ID)
+ Finalize()
+
+ // Returns the WriteBatch's internal capacity. Used by the pool to throw
+ // away batches that have grown too large.
+ cap() int
+}
+
+// BatchWriter is the interface that is used for preparing a batch of
+// writes.
+type BatchWriter interface {
+ Add(
+ originalIndex int,
+ id ident.ID,
+ timestamp time.Time,
+ value float64,
+ unit xtime.Unit,
+ annotation []byte,
+ ) error
+
+ AddTagged(
+ originalIndex int,
+ id ident.ID,
+ tags ident.TagIterator,
+ encodedTags ts.EncodedTags,
+ timestamp time.Time,
+ value float64,
+ unit xtime.Unit,
+ annotation []byte,
+ ) error
+
+ SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn)
+
+ SetFinalizeAnnotationFn(f FinalizeAnnotationFn)
+}
diff --git a/src/dbnode/ts/write_batch.go b/src/dbnode/ts/writes/write_batch.go
similarity index 82%
rename from src/dbnode/ts/write_batch.go
rename to src/dbnode/ts/writes/write_batch.go
index 987987fec0..455e00896a 100644
--- a/src/dbnode/ts/write_batch.go
+++ b/src/dbnode/ts/writes/write_batch.go
@@ -18,12 +18,13 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package ts
+package writes
import (
"errors"
"time"
+ "github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
)
@@ -33,8 +34,9 @@ var (
)
type writeBatch struct {
- writes []BatchWrite
- ns ident.ID
+ writes []BatchWrite
+ pendingIndex []PendingIndexInsert
+ ns ident.ID
// Enables callers to pool encoded tags by allowing them to
// provide a function to finalize all encoded tags once the
// writeBatch itself gets finalized.
@@ -53,9 +55,10 @@ func NewWriteBatch(
finalizeFn func(WriteBatch),
) WriteBatch {
return &writeBatch{
- writes: make([]BatchWrite, 0, batchSize),
- ns: ns,
- finalizeFn: finalizeFn,
+ writes: make([]BatchWrite, 0, batchSize),
+ pendingIndex: make([]PendingIndexInsert, 0, batchSize),
+ ns: ns,
+ finalizeFn: finalizeFn,
}
}
@@ -80,7 +83,7 @@ func (b *writeBatch) AddTagged(
originalIndex int,
id ident.ID,
tagIter ident.TagIterator,
- encodedTags EncodedTags,
+ encodedTags ts.EncodedTags,
timestamp time.Time,
value float64,
unit xtime.Unit,
@@ -116,11 +119,15 @@ func (b *writeBatch) Iter() []BatchWrite {
return b.writes
}
-func (b *writeBatch) SetOutcome(idx int, series Series, err error) {
+func (b *writeBatch) SetSeries(idx int, series ts.Series) {
b.writes[idx].SkipWrite = false
b.writes[idx].Write.Series = series
// Make sure that the EncodedTags does not get clobbered
b.writes[idx].Write.Series.EncodedTags = b.writes[idx].EncodedTags
+}
+
+func (b *writeBatch) SetError(idx int, err error) {
+ b.writes[idx].SkipWrite = true
b.writes[idx].Err = err
}
@@ -128,6 +135,15 @@ func (b *writeBatch) SetSkipWrite(idx int) {
b.writes[idx].SkipWrite = true
}
+func (b *writeBatch) SetPendingIndex(idx int, pending PendingIndexInsert) {
+ b.writes[idx].PendingIndex = true
+ b.pendingIndex = append(b.pendingIndex, pending)
+}
+
+func (b *writeBatch) PendingIndex() []PendingIndexInsert {
+ return b.pendingIndex
+}
+
// Set the function that will be called to finalize annotations when a WriteBatch
// is finalized, allowing the caller to pool them.
func (b *writeBatch) SetFinalizeEncodedTagsFn(f FinalizeEncodedTagsFn) {
@@ -174,6 +190,13 @@ func (b *writeBatch) Finalize() {
}
b.writes = b.writes[:0]
+ var zeroedIndex PendingIndexInsert
+ for i := range b.pendingIndex {
+ // Remove any remaining pointers for G.C reasons.
+ b.pendingIndex[i] = zeroedIndex
+ }
+ b.pendingIndex = b.pendingIndex[:0]
+
b.finalizeFn(b)
}
@@ -186,7 +209,7 @@ func newBatchWriterWrite(
namespace ident.ID,
id ident.ID,
tagIter ident.TagIterator,
- encodedTags EncodedTags,
+ encodedTags ts.EncodedTags,
timestamp time.Time,
value float64,
unit xtime.Unit,
@@ -199,14 +222,15 @@ func newBatchWriterWrite(
}
return BatchWrite{
Write: Write{
- Series: Series{
+ Series: ts.Series{
ID: id,
EncodedTags: encodedTags,
Namespace: namespace,
},
- Datapoint: Datapoint{
- Timestamp: timestamp,
- Value: value,
+ Datapoint: ts.Datapoint{
+ Timestamp: timestamp,
+ TimestampNanos: xtime.ToUnixNano(timestamp),
+ Value: value,
},
Unit: unit,
Annotation: annotation,
diff --git a/src/dbnode/ts/write_batch_mock.go b/src/dbnode/ts/writes/write_batch_mock.go
similarity index 80%
rename from src/dbnode/ts/write_batch_mock.go
rename to src/dbnode/ts/writes/write_batch_mock.go
index f8a5ff682f..b4c6a15c1c 100644
--- a/src/dbnode/ts/write_batch_mock.go
+++ b/src/dbnode/ts/writes/write_batch_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/m3db/m3/src/dbnode/ts/types.go
+// Source: github.com/m3db/m3/src/dbnode/ts/writes/types.go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -21,13 +21,14 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// Package ts is a generated GoMock package.
-package ts
+// Package writes is a generated GoMock package.
+package writes
import (
"reflect"
"time"
+ "github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/ident"
time0 "github.com/m3db/m3/src/x/time"
@@ -72,7 +73,7 @@ func (mr *MockWriteBatchMockRecorder) Add(originalIndex, id, timestamp, value, u
}
// AddTagged mocks base method
-func (m *MockWriteBatch) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error {
+func (m *MockWriteBatch) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags ts.EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddTagged", originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation)
ret0, _ := ret[0].(error)
@@ -123,16 +124,54 @@ func (mr *MockWriteBatchMockRecorder) Iter() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iter", reflect.TypeOf((*MockWriteBatch)(nil).Iter))
}
-// SetOutcome mocks base method
-func (m *MockWriteBatch) SetOutcome(idx int, series Series, err error) {
+// SetPendingIndex mocks base method
+func (m *MockWriteBatch) SetPendingIndex(idx int, pending PendingIndexInsert) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "SetOutcome", idx, series, err)
+ m.ctrl.Call(m, "SetPendingIndex", idx, pending)
}
-// SetOutcome indicates an expected call of SetOutcome
-func (mr *MockWriteBatchMockRecorder) SetOutcome(idx, series, err interface{}) *gomock.Call {
+// SetPendingIndex indicates an expected call of SetPendingIndex
+func (mr *MockWriteBatchMockRecorder) SetPendingIndex(idx, pending interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetOutcome", reflect.TypeOf((*MockWriteBatch)(nil).SetOutcome), idx, series, err)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPendingIndex", reflect.TypeOf((*MockWriteBatch)(nil).SetPendingIndex), idx, pending)
+}
+
+// PendingIndex mocks base method
+func (m *MockWriteBatch) PendingIndex() []PendingIndexInsert {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PendingIndex")
+ ret0, _ := ret[0].([]PendingIndexInsert)
+ return ret0
+}
+
+// PendingIndex indicates an expected call of PendingIndex
+func (mr *MockWriteBatchMockRecorder) PendingIndex() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingIndex", reflect.TypeOf((*MockWriteBatch)(nil).PendingIndex))
+}
+
+// SetError mocks base method
+func (m *MockWriteBatch) SetError(idx int, err error) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetError", idx, err)
+}
+
+// SetError indicates an expected call of SetError
+func (mr *MockWriteBatchMockRecorder) SetError(idx, err interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetError", reflect.TypeOf((*MockWriteBatch)(nil).SetError), idx, err)
+}
+
+// SetSeries mocks base method
+func (m *MockWriteBatch) SetSeries(idx int, series ts.Series) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetSeries", idx, series)
+}
+
+// SetSeries indicates an expected call of SetSeries
+func (mr *MockWriteBatchMockRecorder) SetSeries(idx, series interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSeries", reflect.TypeOf((*MockWriteBatch)(nil).SetSeries), idx, series)
}
// SetSkipWrite mocks base method
@@ -223,7 +262,7 @@ func (mr *MockBatchWriterMockRecorder) Add(originalIndex, id, timestamp, value,
}
// AddTagged mocks base method
-func (m *MockBatchWriter) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error {
+func (m *MockBatchWriter) AddTagged(originalIndex int, id ident.ID, tags ident.TagIterator, encodedTags ts.EncodedTags, timestamp time.Time, value float64, unit time0.Unit, annotation []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddTagged", originalIndex, id, tags, encodedTags, timestamp, value, unit, annotation)
ret0, _ := ret[0].(error)
diff --git a/src/dbnode/ts/write_batch_pool.go b/src/dbnode/ts/writes/write_batch_pool.go
similarity index 99%
rename from src/dbnode/ts/write_batch_pool.go
rename to src/dbnode/ts/writes/write_batch_pool.go
index a29b9f049d..1aec4272c7 100644
--- a/src/dbnode/ts/write_batch_pool.go
+++ b/src/dbnode/ts/writes/write_batch_pool.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package ts
+package writes
import (
"github.com/m3db/m3/src/x/pool"
diff --git a/src/dbnode/ts/write_batch_test.go b/src/dbnode/ts/writes/write_batch_test.go
similarity index 96%
rename from src/dbnode/ts/write_batch_test.go
rename to src/dbnode/ts/writes/write_batch_test.go
index 12b76a2c2f..7f5e7976ee 100644
--- a/src/dbnode/ts/write_batch_test.go
+++ b/src/dbnode/ts/writes/write_batch_test.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package ts
+package writes
import (
"bytes"
@@ -184,14 +184,14 @@ func TestBatchWriterSetSeries(t *testing.T) {
)
newSeries.ID = ident.StringID(fmt.Sprint(i))
- var err error
if i == len(iter)-1 {
// Set skip for this to true; it should revert to not skipping after
// SetOutcome called below.
- err = errors.New("some-error")
- writeBatch.SetSkipWrite(i)
+ err := errors.New("some-error")
+ writeBatch.SetError(i, err)
+ } else {
+ writeBatch.SetSeries(i, newSeries)
}
- writeBatch.SetOutcome(i, newSeries, err)
}
iter = writeBatch.Iter()
@@ -205,14 +205,16 @@ func TestBatchWriterSetSeries(t *testing.T) {
currSeries = currWrite.Series
i = j + 1
)
- require.Equal(t, fmt.Sprint(i), string(currSeries.ID.String()))
- require.True(t, ident.StringID(fmt.Sprint(i)).Equal(currSeries.ID))
- require.False(t, curr.SkipWrite)
if i == len(iter)-1 {
require.Equal(t, errors.New("some-error"), curr.Err)
- } else {
- require.NoError(t, curr.Err)
+ require.True(t, curr.SkipWrite)
+ continue
}
+
+ require.Equal(t, fmt.Sprint(i), string(currSeries.ID.String()))
+ require.False(t, curr.SkipWrite)
+
+ require.NoError(t, curr.Err)
}
}
diff --git a/src/dbnode/x/tchannel/options.go b/src/dbnode/x/tchannel/options.go
index d8e29537c7..d3f9775c58 100644
--- a/src/dbnode/x/tchannel/options.go
+++ b/src/dbnode/x/tchannel/options.go
@@ -29,6 +29,9 @@ import (
const (
defaultIdleCheckInterval = 5 * time.Minute
defaultMaxIdleTime = 5 * time.Minute
+ // defaultSendBufferSize sets the default send buffer size,
+ // by default only 512 frames would be buffered.
+ defaultSendBufferSize = 4096
)
// NewDefaultChannelOptions returns the default tchannel options used.
@@ -37,5 +40,8 @@ func NewDefaultChannelOptions() *tchannel.ChannelOptions {
Logger: NewNoopLogger(),
MaxIdleTime: defaultMaxIdleTime,
IdleCheckInterval: defaultIdleCheckInterval,
+ DefaultConnectionOptions: tchannel.ConnectionOptions{
+ SendBufferSize: defaultSendBufferSize,
+ },
}
}
diff --git a/src/dbnode/x/xio/block_reader_test.go b/src/dbnode/x/xio/block_reader_test.go
index 2ed8da4a8a..fd7371a7cb 100644
--- a/src/dbnode/x/xio/block_reader_test.go
+++ b/src/dbnode/x/xio/block_reader_test.go
@@ -210,7 +210,7 @@ func TestBlockIsNotEmpty(t *testing.T) {
func TestFilterEmptyBlockReadersSliceOfSlicesInPlace(t *testing.T) {
var (
head = checked.NewBytes([]byte("some-data"), checked.NewBytesOptions())
- segment = ts.NewSegment(head, nil, 0)
+ segment = ts.NewSegment(head, nil, 0, 0)
segmentReader = NewSegmentReader(segment)
)
notEmpty := BlockReader{
@@ -231,7 +231,7 @@ func TestFilterEmptyBlockReadersSliceOfSlicesInPlace(t *testing.T) {
func TestFilterEmptyBlockReadersInPlace(t *testing.T) {
var (
head = checked.NewBytes([]byte("some-data"), checked.NewBytesOptions())
- segment = ts.NewSegment(head, nil, 0)
+ segment = ts.NewSegment(head, nil, 0, 0)
segmentReader = NewSegmentReader(segment)
)
notEmpty := BlockReader{
diff --git a/src/dbnode/x/xio/io_mock.go b/src/dbnode/x/xio/io_mock.go
index 71e175f939..daa99ba481 100644
--- a/src/dbnode/x/xio/io_mock.go
+++ b/src/dbnode/x/xio/io_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/x/xio (interfaces: SegmentReader,SegmentReaderPool)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator.go b/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator.go
index 1914d24448..9df35045c3 100644
--- a/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator.go
+++ b/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator.go
@@ -41,7 +41,7 @@ func NewReaderSliceOfSlicesFromBlockReadersIterator(
}
func (it *readerSliceOfSlicesIterator) Next() bool {
- if !(it.idx+1 < it.len) {
+ if it.idx >= it.len-1 {
return false
}
it.idx++
@@ -71,7 +71,7 @@ func (it *readerSliceOfSlicesIterator) CurrentReaderAt(idx int) BlockReader {
func (it *readerSliceOfSlicesIterator) Reset(blocks [][]BlockReader) {
it.blocks = blocks
- it.idx = -1
+ it.resetIndex()
it.len = len(blocks)
it.closed = false
}
@@ -104,3 +104,11 @@ func (it *readerSliceOfSlicesIterator) Size() (int, error) {
}
return size, nil
}
+
+func (it *readerSliceOfSlicesIterator) Rewind() {
+ it.resetIndex()
+}
+
+func (it *readerSliceOfSlicesIterator) resetIndex() {
+ it.idx = -1
+}
diff --git a/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator_test.go b/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator_test.go
index 49b4dc6f50..21a1a82edf 100644
--- a/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator_test.go
+++ b/src/dbnode/x/xio/reader_slice_of_slices_from_block_readers_iterator_test.go
@@ -42,6 +42,32 @@ func TestReaderSliceOfSlicesFromBlockReadersIterator(t *testing.T) {
}
iter := NewReaderSliceOfSlicesFromBlockReadersIterator(readers)
+ validateIterReaders(t, iter, readers)
+}
+
+func TestRewind(t *testing.T) {
+ var a, b, c, d, e, f BlockReader
+ all := []BlockReader{a, b, c, d, e, f}
+ for i := range all {
+ all[i] = BlockReader{
+ SegmentReader: nullSegmentReader{},
+ }
+ }
+
+ readers := [][]BlockReader{
+ []BlockReader{a, b, c},
+ []BlockReader{d},
+ []BlockReader{e, f},
+ }
+
+ iter := NewReaderSliceOfSlicesFromBlockReadersIterator(readers)
+ validateIterReaders(t, iter, readers)
+
+ iter.Rewind()
+ validateIterReaders(t, iter, readers)
+}
+
+func validateIterReaders(t *testing.T, iter ReaderSliceOfSlicesFromBlockReadersIterator, readers [][]BlockReader) {
for i := range readers {
assert.True(t, iter.Next())
l, _, _ := iter.CurrentReaders()
diff --git a/src/dbnode/x/xio/segment_reader.go b/src/dbnode/x/xio/segment_reader.go
index eb364ab803..352c8feeea 100644
--- a/src/dbnode/x/xio/segment_reader.go
+++ b/src/dbnode/x/xio/segment_reader.go
@@ -28,9 +28,11 @@ import (
)
type segmentReader struct {
- segment ts.Segment
- si int
- pool SegmentReaderPool
+ segment ts.Segment
+ lazyHead []byte
+ lazyTail []byte
+ si int
+ pool SegmentReaderPool
}
// NewSegmentReader creates a new segment reader along with a specified segment.
@@ -48,20 +50,21 @@ func (sr *segmentReader) Read(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
- var head, tail []byte
- if b := sr.segment.Head; b != nil {
- head = b.Bytes()
+
+ if b := sr.segment.Head; b != nil && len(sr.lazyHead) == 0 {
+ sr.lazyHead = b.Bytes()
}
- if b := sr.segment.Tail; b != nil {
- tail = b.Bytes()
+ if b := sr.segment.Tail; b != nil && len(sr.lazyTail) == 0 {
+ sr.lazyTail = b.Bytes()
}
- nh, nt := len(head), len(tail)
+
+ nh, nt := len(sr.lazyHead), len(sr.lazyTail)
if sr.si >= nh+nt {
return 0, io.EOF
}
n := 0
if sr.si < nh {
- nRead := copy(b, head[sr.si:])
+ nRead := copy(b, sr.lazyHead[sr.si:])
sr.si += nRead
n += nRead
if n == len(b) {
@@ -69,7 +72,7 @@ func (sr *segmentReader) Read(b []byte) (int, error) {
}
}
if sr.si < nh+nt {
- nRead := copy(b[n:], tail[sr.si-nh:])
+ nRead := copy(b[n:], sr.lazyTail[sr.si-nh:])
sr.si += nRead
n += nRead
}
@@ -86,11 +89,14 @@ func (sr *segmentReader) Segment() (ts.Segment, error) {
func (sr *segmentReader) Reset(segment ts.Segment) {
sr.segment = segment
sr.si = 0
+ sr.lazyHead = sr.lazyHead[:0]
+ sr.lazyTail = sr.lazyTail[:0]
}
func (sr *segmentReader) Finalize() {
- // Finalize the segment
sr.segment.Finalize()
+ sr.lazyHead = nil
+ sr.lazyTail = nil
if pool := sr.pool; pool != nil {
pool.Put(sr)
diff --git a/src/dbnode/x/xio/segment_reader_test.go b/src/dbnode/x/xio/segment_reader_test.go
index 7a6c779c81..ef0f704fb0 100644
--- a/src/dbnode/x/xio/segment_reader_test.go
+++ b/src/dbnode/x/xio/segment_reader_test.go
@@ -57,7 +57,8 @@ func testSegmentReader(
checkd byteFunc,
pool pool.CheckedBytesPool,
) {
- segment := ts.NewSegment(checkd(head), checkd(tail), ts.FinalizeNone)
+ checksum := uint32(10)
+ segment := ts.NewSegment(checkd(head), checkd(tail), checksum, ts.FinalizeNone)
r := NewSegmentReader(segment)
var b [100]byte
n, err := r.Read(b[:])
@@ -73,6 +74,7 @@ func testSegmentReader(
require.NoError(t, err)
require.Equal(t, head, seg.Head.Bytes())
require.Equal(t, tail, seg.Tail.Bytes())
+ require.Equal(t, checksum, seg.CalculateChecksum())
// Ensure cloned segment reader does not share original head and tail.
cloned, err := r.Clone(pool)
@@ -85,6 +87,7 @@ func testSegmentReader(
require.NoError(t, err)
require.Equal(t, head, seg.Head.Bytes())
require.Equal(t, tail, seg.Tail.Bytes())
+ require.Equal(t, checksum, seg.CalculateChecksum())
cloned.Finalize()
segment.Finalize()
diff --git a/src/dbnode/x/xio/types.go b/src/dbnode/x/xio/types.go
index cf06d9af63..09c18dca6d 100644
--- a/src/dbnode/x/xio/types.go
+++ b/src/dbnode/x/xio/types.go
@@ -86,6 +86,10 @@ type ReaderSliceOfSlicesIterator interface {
// Size gives the size of bytes in this iterator.
Size() (int, error)
+
+ // Rewind returns the iterator to the beginning.
+ // This operation is invalid if any of the block readers have been read.
+ Rewind()
}
// ReaderSliceOfSlicesFromBlockReadersIterator is an iterator
diff --git a/src/m3em/README.md b/src/m3em/README.md
index 46c35bb866..80fd2d28bb 100644
--- a/src/m3em/README.md
+++ b/src/m3em/README.md
@@ -1,17 +1,9 @@
-m3em [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+m3em
==============================================================================================
`m3em` (pronounced `meme`) is an acronym for M3 Environment Manager. [ccm](https://github.com/pcmanus/ccm)`:C* :: m3em:m3db`. Unlike `ccm`, `m3em` permits remote host operations.
-The goal of `m3em` is to make it easy to create, manage and destroy services across hosts. It is meant for testing clustered services like [m3db](https://github.com/m3db/m3) and [m3aggregator](https://github.com/m3db/m3aggregator) .
-
-[doc-img]: https://godoc.org/github.com/m3db/m3em?status.svg
-[doc]: https://godoc.org/github.com/m3db/m3em
-[ci-img]: https://travis-ci.org/m3db/m3em.svg?branch=master
-[ci]: https://travis-ci.org/m3db/m3em
-[cov-img]: https://coveralls.io/repos/m3db/m3em/badge.svg?branch=master&service=github
-[cov]: https://coveralls.io/github/m3db/m3em?branch=master
-
+The goal of `m3em` is to make it easy to create, manage and destroy services across hosts. It is meant for testing clustered services like [m3db](https://github.com/m3db/m3) and [m3aggregator](https://github.com/m3db/m3) .
## Components
There are two primary components in m3em:
@@ -57,7 +49,3 @@ agent:
EOF
$ /remote-path/m3em_agent -f m3em.agent.yaml
```
-
-
-
-This project is released under the [Apache License, Version 2.0](LICENSE).
diff --git a/src/m3em/generated/proto/m3em/m3em_mock.go b/src/m3em/generated/proto/m3em/m3em_mock.go
index 0b2266d000..24d6d67813 100644
--- a/src/m3em/generated/proto/m3em/m3em_mock.go
+++ b/src/m3em/generated/proto/m3em/m3em_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/m3em/generated/proto/m3em (interfaces: OperatorClient,Operator_PushFileClient,Operator_PullFileClient,Operator_PullFileServer)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/m3em/node/node_mock.go b/src/m3em/node/node_mock.go
index 44e471afef..4c22d30694 100644
--- a/src/m3em/node/node_mock.go
+++ b/src/m3em/node/node_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/m3em/node (interfaces: ServiceNode,Options)
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -214,6 +214,20 @@ func (mr *MockServiceNodeMockRecorder) IsolationGroup() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsolationGroup", reflect.TypeOf((*MockServiceNode)(nil).IsolationGroup))
}
+// Metadata mocks base method
+func (m *MockServiceNode) Metadata() placement.InstanceMetadata {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Metadata")
+ ret0, _ := ret[0].(placement.InstanceMetadata)
+ return ret0
+}
+
+// Metadata indicates an expected call of Metadata
+func (mr *MockServiceNodeMockRecorder) Metadata() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockServiceNode)(nil).Metadata))
+}
+
// Port mocks base method
func (m *MockServiceNode) Port() uint32 {
m.ctrl.T.Helper()
@@ -313,6 +327,20 @@ func (mr *MockServiceNodeMockRecorder) SetIsolationGroup(arg0 interface{}) *gomo
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIsolationGroup", reflect.TypeOf((*MockServiceNode)(nil).SetIsolationGroup), arg0)
}
+// SetMetadata mocks base method
+func (m *MockServiceNode) SetMetadata(arg0 placement.InstanceMetadata) placement.Instance {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetMetadata", arg0)
+ ret0, _ := ret[0].(placement.Instance)
+ return ret0
+}
+
+// SetMetadata indicates an expected call of SetMetadata
+func (mr *MockServiceNodeMockRecorder) SetMetadata(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMetadata", reflect.TypeOf((*MockServiceNode)(nil).SetMetadata), arg0)
+}
+
// SetPort mocks base method
func (m *MockServiceNode) SetPort(arg0 uint32) placement.Instance {
m.ctrl.T.Helper()
diff --git a/src/m3ninx/doc/document.go b/src/m3ninx/doc/document.go
index fa7a5facf0..164c0f210e 100644
--- a/src/m3ninx/doc/document.go
+++ b/src/m3ninx/doc/document.go
@@ -30,7 +30,8 @@ import (
var (
errReservedFieldName = fmt.Errorf("'%s' is a reserved field name", IDReservedFieldName)
- errEmptyDocument = errors.New("document cannot be empty")
+ // ErrEmptyDocument is an error for an empty document.
+ ErrEmptyDocument = errors.New("document cannot be empty")
)
// IDReservedFieldName is the field name reserved for IDs.
@@ -152,13 +153,18 @@ func (d Document) Equal(other Document) bool {
// Validate returns a bool indicating whether the document is valid.
func (d Document) Validate() error {
if len(d.Fields) == 0 && !d.HasID() {
- return errEmptyDocument
+ return ErrEmptyDocument
+ }
+
+ if !utf8.Valid(d.ID) {
+ return fmt.Errorf("document has invalid ID: id=%v, id_hex=%x", d.ID, d.ID)
}
for _, f := range d.Fields {
// TODO: Should we enforce uniqueness of field names?
if !utf8.Valid(f.Name) {
- return fmt.Errorf("document contains invalid field name: %v", f.Name)
+ return fmt.Errorf("document has invalid field name: name=%v, name_hex=%x",
+ f.Name, f.Name)
}
if bytes.Equal(f.Name, IDReservedFieldName) {
@@ -166,7 +172,8 @@ func (d Document) Validate() error {
}
if !utf8.Valid(f.Value) {
- return fmt.Errorf("document contains invalid field value: %v", f.Value)
+ return fmt.Errorf("document has invalid field value: value=%v, value_hex=%x",
+ f.Value, f.Value)
}
}
diff --git a/src/m3ninx/index/batch.go b/src/m3ninx/index/batch.go
index 82803a6349..93162037aa 100644
--- a/src/m3ninx/index/batch.go
+++ b/src/m3ninx/index/batch.go
@@ -24,6 +24,7 @@ import (
"bytes"
"errors"
"fmt"
+ "sync"
"github.com/m3db/m3/src/m3ninx/doc"
)
@@ -79,6 +80,8 @@ func NewBatch(docs []doc.Document, opts ...BatchOption) Batch {
// BatchPartialError indicates an error was encountered inserting some documents in a batch.
// It is not safe for concurrent use.
type BatchPartialError struct {
+ sync.Mutex
+
errs []BatchError
}
@@ -105,10 +108,10 @@ func (e *BatchPartialError) Error() string {
return b.String()
}
-// FilterDuplicateIDErrors returns a new BatchPartialError (or nil), without
-// any DuplicateIDError(s).
-// NB(prateek): it mutates the order of errors in the original error to avoid
-// allocations.
+// FilterDuplicateIDErrors returns a new BatchPartialError (or nil), without any DuplicateIDError(s).
+// NB(prateek): it mutates the order of errors in the original error to avoid allocations.
+// NB(bodu): we return an `error` here since go does not evaluate `nil` errors correctly when
+// we return a custom type (*BatchPartialError) here and cast it to `error`.
func (e *BatchPartialError) FilterDuplicateIDErrors() error {
// cheap to do the copy as it's just pointers for the slices
var (
@@ -138,6 +141,16 @@ func (e *BatchPartialError) Add(err BatchError) {
e.errs = append(e.errs, err)
}
+// AddWithLock adds an error to e with a lock. Any nil errors are ignored.
+func (e *BatchPartialError) AddWithLock(err BatchError) {
+ if err.Err == nil {
+ return
+ }
+ e.Lock()
+ e.errs = append(e.errs, err)
+ e.Unlock()
+}
+
// Errs returns the errors with the indexes of the documents in the batch
// which were not indexed.
func (e *BatchPartialError) Errs() []BatchError {
diff --git a/src/m3ninx/index/segment/builder/builder.go b/src/m3ninx/index/segment/builder/builder.go
index 8367c5f6cf..ac1cd597e5 100644
--- a/src/m3ninx/index/segment/builder/builder.go
+++ b/src/m3ninx/index/segment/builder/builder.go
@@ -23,54 +23,270 @@ package builder
import (
"errors"
"fmt"
+ "runtime"
+ "sync"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/util"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/twotwotwo/sorts"
)
var (
errDocNotFound = errors.New("doc not found")
+ errClosed = errors.New("builder closed")
+)
+
+const (
+ // Slightly buffer the work to avoid blocking main thread.
+ indexQueueSize = 2 << 9 // 1024
+ entriesPerIndexJob = 32
)
+var (
+ globalIndexWorkers = &indexWorkers{}
+ fieldsMapSetOptions = fieldsMapSetUnsafeOptions{
+ // Builder takes ownership of keys and docs so it's ok
+ // to avoid copying and finalizing keys.
+ NoCopyKey: true,
+ NoFinalizeKey: true,
+ }
+)
+
+type indexWorkers struct {
+ sync.RWMutex
+ builders int
+ queues []chan indexJob
+}
+
+type indexJob struct {
+ wg *sync.WaitGroup
+
+ opts Options
+
+ entries [entriesPerIndexJob]indexJobEntry
+ usedEntries int
+
+ shard int
+ shardedFields *shardedFields
+
+ batchErr *index.BatchPartialError
+}
+
+type indexJobEntry struct {
+ id postings.ID
+ field doc.Field
+ docIdx int
+}
+
+func (w *indexWorkers) registerBuilder() {
+ w.Lock()
+ defer w.Unlock()
+
+ preIncBuilders := w.builders
+ w.builders++
+
+ if preIncBuilders != 0 {
+ return // Already initialized.
+ }
+
+ // Need to initialize structures, prepare all num CPU
+ // worker queues, even if we don't use all of them.
+ n := runtime.NumCPU()
+ if cap(w.queues) == 0 {
+ w.queues = make([]chan indexJob, 0, n)
+ } else {
+ // Reuse existing queues slice.
+ w.queues = w.queues[:0]
+ }
+
+ // Start the workers.
+ for i := 0; i < n; i++ {
+ indexQueue := make(chan indexJob, indexQueueSize)
+ w.queues = append(w.queues, indexQueue)
+ go w.indexWorker(indexQueue)
+ }
+}
+
+func (w *indexWorkers) indexWorker(indexQueue <-chan indexJob) {
+ for job := range indexQueue {
+ for i := 0; i < job.usedEntries; i++ {
+ entry := job.entries[i]
+ terms, ok := job.shardedFields.fields.ShardedGet(job.shard, entry.field.Name)
+ if !ok {
+ // NB(bodu): Check again within the lock to make sure we aren't making concurrent map writes.
+ terms = newTerms(job.opts)
+ job.shardedFields.fields.ShardedSetUnsafe(job.shard, entry.field.Name,
+ terms, fieldsMapSetOptions)
+ }
+
+ // If empty field, track insertion of this key into the fields
+ // collection for correct response when retrieving all fields.
+ newField := terms.size() == 0
+ // NB(bodu): Bulk of the cpu time during insertion is spent inside of terms.post().
+ err := terms.post(entry.field.Value, entry.id)
+ if err != nil {
+ job.batchErr.AddWithLock(index.BatchError{Err: err, Idx: entry.docIdx})
+ }
+ if err == nil && newField {
+ newEntry := uniqueField{
+ field: entry.field.Name,
+ postingsList: terms.postingsListUnion,
+ }
+ job.shardedFields.uniqueFields[job.shard] =
+ append(job.shardedFields.uniqueFields[job.shard], newEntry)
+ }
+ }
+
+ job.wg.Done()
+ }
+}
+
+func (w *indexWorkers) indexJob(job indexJob) {
+ w.queues[job.shard] <- job
+}
+
+func (w *indexWorkers) unregisterBuilder() {
+ w.Lock()
+ defer w.Unlock()
+
+ w.builders--
+
+ if w.builders != 0 {
+ return // Still have registered builders, cannot spin down yet.
+ }
+
+ // Close the workers.
+ for i := range w.queues {
+ close(w.queues[i])
+ w.queues[i] = nil
+ }
+ w.queues = w.queues[:0]
+}
+
+type builderStatus struct {
+ sync.RWMutex
+ closed bool
+}
+
type builder struct {
opts Options
newUUIDFn util.NewUUIDFn
- offset postings.ID
+ batchSizeOne index.Batch
+ docs []doc.Document
+ idSet *IDsMap
+ shardedJobs []indexJob
+ shardedFields *shardedFields
+ concurrency int
+
+ status builderStatus
+}
- batchSizeOne index.Batch
- docs []doc.Document
- idSet *IDsMap
- fields *fieldsMap
- uniqueFields [][]byte
+type shardedFields struct {
+ fields *shardedFieldsMap
+ uniqueFields [][]uniqueField
}
// NewBuilderFromDocuments returns a builder from documents, it is
// not thread safe and is optimized for insertion speed and a
// final build step when documents are indexed.
-func NewBuilderFromDocuments(opts Options) (segment.DocumentsBuilder, error) {
- return &builder{
+func NewBuilderFromDocuments(opts Options) (segment.CloseableDocumentsBuilder, error) {
+ b := &builder{
opts: opts,
newUUIDFn: opts.NewUUIDFn(),
batchSizeOne: index.Batch{
- Docs: make([]doc.Document, 1),
- AllowPartialUpdates: false,
+ Docs: make([]doc.Document, 1),
},
idSet: NewIDsMap(IDsMapOptions{
InitialSize: opts.InitialCapacity(),
}),
- fields: newFieldsMap(fieldsMapOptions{
- InitialSize: opts.InitialCapacity(),
- }),
- uniqueFields: make([][]byte, 0, opts.InitialCapacity()),
- }, nil
+ shardedFields: &shardedFields{},
+ }
+ // Indiciate we need to spin up workers if we haven't already.
+ globalIndexWorkers.registerBuilder()
+ b.SetIndexConcurrency(opts.Concurrency())
+ return b, nil
+}
+
+func (b *builder) SetIndexConcurrency(value int) {
+ b.status.Lock()
+ defer b.status.Unlock()
+
+ if b.concurrency == value {
+ return // No-op
+ }
+
+ b.concurrency = value
+
+ // Nothing to migrate, jobs only used during a batch insertion.
+ b.shardedJobs = make([]indexJob, b.concurrency)
+
+ // Take refs to existing fields to migrate.
+ existingUniqueFields := b.shardedFields.uniqueFields
+ existingFields := b.shardedFields.fields
+
+ b.shardedFields.uniqueFields = make([][]uniqueField, 0, b.concurrency)
+ b.shardedFields.fields = newShardedFieldsMap(b.concurrency, b.opts.InitialCapacity())
+
+ for i := 0; i < b.concurrency; i++ {
+ // Give each shard a fraction of the configured initial capacity.
+ shardInitialCapacity := b.opts.InitialCapacity()
+ if shardInitialCapacity > 0 {
+ shardInitialCapacity /= b.concurrency
+ }
+
+ shardUniqueFields := make([]uniqueField, 0, shardInitialCapacity)
+ b.shardedFields.uniqueFields =
+ append(b.shardedFields.uniqueFields, shardUniqueFields)
+ }
+
+ // Migrate data from existing unique fields.
+ if existingUniqueFields != nil {
+ for _, fields := range existingUniqueFields {
+ for _, field := range fields {
+ // Calculate the new shard for the field.
+ newShard := b.calculateShardWithRLock(field.field)
+
+ // Append to the correct shard.
+ b.shardedFields.uniqueFields[newShard] =
+ append(b.shardedFields.uniqueFields[newShard], field)
+ }
+ }
+ }
+
+ // Migrate from fields.
+ if existingFields != nil {
+ for _, fields := range existingFields.data {
+ for _, entry := range fields.Iter() {
+ field := entry.Key()
+ terms := entry.Value()
+
+ // Calculate the new shard for the field.
+ newShard := b.calculateShardWithRLock(field)
+
+ // Set with new correct shard.
+ b.shardedFields.fields.ShardedSetUnsafe(newShard, field,
+ terms, fieldsMapSetOptions)
+ }
+ }
+ }
}
-func (b *builder) Reset(offset postings.ID) {
- b.offset = offset
+func (b *builder) IndexConcurrency() int {
+ b.status.RLock()
+ defer b.status.RUnlock()
+
+ return b.concurrency
+}
+
+func (b *builder) Reset() {
+ b.status.Lock()
+ defer b.status.Unlock()
// Reset the documents slice.
var empty doc.Document
@@ -83,23 +299,32 @@ func (b *builder) Reset(offset postings.ID) {
b.idSet.Reset()
// Keep fields around, just reset the terms set for each one.
- for _, entry := range b.fields.Iter() {
- entry.Value().reset()
- }
+ b.shardedFields.fields.ResetTermsSets()
// Reset the unique fields slice
- for i := range b.uniqueFields {
- b.uniqueFields[i] = nil
+ var emptyField uniqueField
+ for i, shardUniqueFields := range b.shardedFields.uniqueFields {
+ for i := range shardUniqueFields {
+ shardUniqueFields[i] = emptyField
+ }
+ b.shardedFields.uniqueFields[i] = shardUniqueFields[:0]
}
- b.uniqueFields = b.uniqueFields[:0]
}
func (b *builder) Insert(d doc.Document) ([]byte, error) {
+ b.status.Lock()
+ defer b.status.Unlock()
+
// Use a preallocated slice to make insert able to avoid alloc
// a slice to call insert batch with.
b.batchSizeOne.Docs[0] = d
- err := b.InsertBatch(b.batchSizeOne)
+ err := b.insertBatchWithLock(b.batchSizeOne)
if err != nil {
+ if errs := err.Errs(); len(errs) == 1 {
+ // Return concrete error instead of the batch partial error.
+ return nil, errs[0].Err
+ }
+ // Fallback to returning batch partial error if not what we expect.
return nil, err
}
last := b.docs[len(b.docs)-1]
@@ -107,15 +332,44 @@ func (b *builder) Insert(d doc.Document) ([]byte, error) {
}
func (b *builder) InsertBatch(batch index.Batch) error {
+ b.status.Lock()
+ defer b.status.Unlock()
+
+ if b.status.closed {
+ return errClosed
+ }
+
+ // NB(r): This switch is required or else *index.BatchPartialError
+ // is returned as a non-nil wrapped "error" even though it is not
+ // an error and underlying error is nil.
+ if err := b.insertBatchWithLock(batch); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *builder) resetShardedJobs() {
+ // Reset sharded jobs using memset optimization.
+ var jobZeroed indexJob
+ for i := range b.shardedJobs {
+ b.shardedJobs[i] = jobZeroed
+ }
+}
+
+func (b *builder) insertBatchWithLock(batch index.Batch) *index.BatchPartialError {
// NB(r): This is all kept in a single method to make the
- // insertion path fast.
+ // insertion path avoid too much function call overhead.
+ wg := &sync.WaitGroup{}
batchErr := index.NewBatchPartialError()
+
+ // Reset shared resources and at cleanup too to remove refs.
+ b.resetShardedJobs()
+ defer b.resetShardedJobs()
+
+ // Enqueue docs for indexing.
for i, d := range batch.Docs {
// Validate doc
if err := d.Validate(); err != nil {
- if !batch.AllowPartialUpdates {
- return err
- }
batchErr.Add(index.BatchError{Err: err, Idx: i})
continue
}
@@ -124,9 +378,6 @@ func (b *builder) InsertBatch(batch index.Batch) error {
if !d.HasID() {
id, err := b.newUUIDFn()
if err != nil {
- if !batch.AllowPartialUpdates {
- return err
- }
batchErr.Add(index.BatchError{Err: err, Idx: i})
continue
}
@@ -139,9 +390,6 @@ func (b *builder) InsertBatch(batch index.Batch) error {
// Avoid duplicates.
if _, ok := b.idSet.Get(d.ID); ok {
- if !batch.AllowPartialUpdates {
- return index.ErrDuplicateID
- }
batchErr.Add(index.BatchError{Err: index.ErrDuplicateID, Idx: i})
continue
}
@@ -158,60 +406,90 @@ func (b *builder) InsertBatch(batch index.Batch) error {
// Index the terms.
for _, f := range d.Fields {
- if err := b.index(postings.ID(postingsListID), f); err != nil {
- if !batch.AllowPartialUpdates {
- return err
- }
- batchErr.Add(index.BatchError{Err: err, Idx: i})
- }
+ b.queueIndexJobEntryWithLock(wg, postings.ID(postingsListID), f, i, batchErr)
}
- if err := b.index(postings.ID(postingsListID), doc.Field{
+ b.queueIndexJobEntryWithLock(wg, postings.ID(postingsListID), doc.Field{
Name: doc.IDReservedFieldName,
Value: d.ID,
- }); err != nil {
- if !batch.AllowPartialUpdates {
- return err
- }
- batchErr.Add(index.BatchError{Err: err, Idx: i})
+ }, i, batchErr)
+ }
+
+ // Enqueue any partially filled sharded jobs.
+ for shard := 0; shard < b.concurrency; shard++ {
+ if b.shardedJobs[shard].usedEntries > 0 {
+ b.flushShardedIndexJobWithLock(shard, wg, batchErr)
}
}
+ // Wait for all the concurrent indexing jobs to finish.
+ wg.Wait()
+
if !batchErr.IsEmpty() {
return batchErr
}
return nil
}
-func (b *builder) index(id postings.ID, f doc.Field) error {
- terms, ok := b.fields.Get(f.Name)
- if !ok {
- terms = newTerms(b.opts)
- b.fields.SetUnsafe(f.Name, terms, fieldsMapSetUnsafeOptions{
- NoCopyKey: true,
- NoFinalizeKey: true,
- })
+func (b *builder) queueIndexJobEntryWithLock(
+ wg *sync.WaitGroup,
+ id postings.ID,
+ field doc.Field,
+ docIdx int,
+ batchErr *index.BatchPartialError,
+) {
+ shard := b.calculateShardWithRLock(field.Name)
+ entryIndex := b.shardedJobs[shard].usedEntries
+ b.shardedJobs[shard].usedEntries++
+ b.shardedJobs[shard].entries[entryIndex].id = id
+ b.shardedJobs[shard].entries[entryIndex].field = field
+ b.shardedJobs[shard].entries[entryIndex].docIdx = docIdx
+
+ numEntries := b.shardedJobs[shard].usedEntries
+ if numEntries != entriesPerIndexJob {
+ return
}
- // If empty field, track insertion of this key into the fields
- // collection for correct response when retrieving all fields.
- newField := terms.size() == 0
- if err := terms.post(f.Value, id); err != nil {
- return err
- }
- if newField {
- b.uniqueFields = append(b.uniqueFields, f.Name)
- }
- return nil
+ // Ready to flush this job since all entries are used.
+ b.flushShardedIndexJobWithLock(shard, wg, batchErr)
+
+ // Reset for reuse.
+ b.shardedJobs[shard] = indexJob{}
+}
+
+func (b *builder) flushShardedIndexJobWithLock(
+ shard int,
+ wg *sync.WaitGroup,
+ batchErr *index.BatchPartialError,
+) {
+ // Set common fields.
+ b.shardedJobs[shard].shard = shard
+ b.shardedJobs[shard].wg = wg
+ b.shardedJobs[shard].batchErr = batchErr
+ b.shardedJobs[shard].shardedFields = b.shardedFields
+ b.shardedJobs[shard].opts = b.opts
+
+ // Enqueue job.
+ wg.Add(1)
+ globalIndexWorkers.indexJob(b.shardedJobs[shard])
+}
+
+func (b *builder) calculateShardWithRLock(field []byte) int {
+ return int(xxhash.Sum64(field) % uint64(b.concurrency))
}
func (b *builder) AllDocs() (index.IDDocIterator, error) {
- rangeIter := postings.NewRangeIterator(b.offset,
- b.offset+postings.ID(len(b.docs)))
+ b.status.RLock()
+ defer b.status.RUnlock()
+
+ rangeIter := postings.NewRangeIterator(0, postings.ID(len(b.docs)))
return index.NewIDDocIterator(b, rangeIter), nil
}
func (b *builder) Doc(id postings.ID) (doc.Document, error) {
- idx := int(id - b.offset)
+ b.status.RLock()
+ defer b.status.RUnlock()
+
+ idx := int(id)
if idx < 0 || idx >= len(b.docs) {
return doc.Document{}, errDocNotFound
}
@@ -220,10 +498,13 @@ func (b *builder) Doc(id postings.ID) (doc.Document, error) {
}
func (b *builder) Docs() []doc.Document {
+ b.status.RLock()
+ defer b.status.RUnlock()
+
return b.docs
}
-func (b *builder) FieldsIterable() segment.FieldsIterable {
+func (b *builder) FieldsIterable() segment.FieldsPostingsListIterable {
return b
}
@@ -231,12 +512,23 @@ func (b *builder) TermsIterable() segment.TermsIterable {
return b
}
-func (b *builder) Fields() (segment.FieldsIterator, error) {
- return NewOrderedBytesSliceIter(b.uniqueFields), nil
+func (b *builder) FieldsPostingsList() (segment.FieldsPostingsListIterator, error) {
+ // NB(r): Need write lock since sort in newOrderedFieldsPostingsListIter
+ // and SetConcurrency causes sharded fields to change.
+ b.status.Lock()
+ defer b.status.Unlock()
+
+ return newOrderedFieldsPostingsListIter(b.shardedFields.uniqueFields), nil
}
func (b *builder) Terms(field []byte) (segment.TermsIterator, error) {
- terms, ok := b.fields.Get(field)
+ // NB(r): Need write lock since sort if required below
+ // and SetConcurrency causes sharded fields to change.
+ b.status.Lock()
+ defer b.status.Unlock()
+
+ shard := b.calculateShardWithRLock(field)
+ terms, ok := b.shardedFields.fields.ShardedGet(shard, field)
if !ok {
return nil, fmt.Errorf("field not found: %s", string(field))
}
@@ -247,3 +539,27 @@ func (b *builder) Terms(field []byte) (segment.TermsIterator, error) {
return newTermsIter(terms.uniqueTerms), nil
}
+
+func (b *builder) Close() error {
+ b.status.Lock()
+ defer b.status.Unlock()
+
+ b.status.closed = true
+ // Indiciate we could possibly spin down workers if no builders open.
+ globalIndexWorkers.unregisterBuilder()
+ return nil
+}
+
+var (
+ sortConcurrencyLock sync.RWMutex
+)
+
+// SetSortConcurrency sets the sort concurrency for when
+// building segments, unfortunately this must be set globally
+// since github.com/twotwotwo/sorts does not provide an
+// ability to set parallelism on call to sort.
+func SetSortConcurrency(value int) {
+ sortConcurrencyLock.Lock()
+ sorts.MaxProcs = value
+ sortConcurrencyLock.Unlock()
+}
diff --git a/src/m3ninx/index/segment/builder/builder_test.go b/src/m3ninx/index/segment/builder/builder_test.go
index 2449db5e72..2a62ae45c1 100644
--- a/src/m3ninx/index/segment/builder/builder_test.go
+++ b/src/m3ninx/index/segment/builder/builder_test.go
@@ -27,6 +27,7 @@ import (
"unsafe"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/stretchr/testify/require"
@@ -79,9 +80,12 @@ var (
func TestBuilderFields(t *testing.T) {
builder, err := NewBuilderFromDocuments(testOptions)
require.NoError(t, err)
+ defer func() {
+ require.NoError(t, builder.Close())
+ }()
for i := 0; i < 10; i++ {
- builder.Reset(0)
+ builder.Reset()
knownsFields := map[string]struct{}{}
for _, d := range testDocuments {
@@ -92,7 +96,7 @@ func TestBuilderFields(t *testing.T) {
require.NoError(t, err)
}
- fieldsIter, err := builder.Fields()
+ fieldsIter, err := builder.FieldsPostingsList()
require.NoError(t, err)
fields := toSlice(t, fieldsIter)
@@ -106,9 +110,12 @@ func TestBuilderFields(t *testing.T) {
func TestBuilderTerms(t *testing.T) {
builder, err := NewBuilderFromDocuments(testOptions)
require.NoError(t, err)
+ defer func() {
+ require.NoError(t, builder.Close())
+ }()
for i := 0; i < 10; i++ {
- builder.Reset(0)
+ builder.Reset()
knownsFields := map[string]map[string]struct{}{}
for _, d := range testDocuments {
@@ -136,10 +143,28 @@ func TestBuilderTerms(t *testing.T) {
}
}
-func toSlice(t *testing.T, iter segment.OrderedBytesIterator) [][]byte {
+// Test that calling Insert(...) API returns correct concrete errors
+// instead of partial batch error type.
+func TestBuilderInsertDuplicateReturnsErrDuplicateID(t *testing.T) {
+ builder, err := NewBuilderFromDocuments(testOptions)
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, builder.Close())
+ }()
+
+ _, err = builder.Insert(testDocuments[2])
+ require.NoError(t, err)
+
+ _, err = builder.Insert(testDocuments[2])
+ require.Error(t, err)
+ require.Equal(t, index.ErrDuplicateID, err)
+}
+
+func toSlice(t *testing.T, iter segment.FieldsPostingsListIterator) [][]byte {
elems := [][]byte{}
for iter.Next() {
- elems = append(elems, iter.Current())
+ b, _ := iter.Current()
+ elems = append(elems, b)
}
require.NoError(t, iter.Err())
require.NoError(t, iter.Close())
@@ -175,10 +200,10 @@ func toTermPostings(t *testing.T, iter segment.TermsIterator) termPostings {
// nolint: unused
func printBuilder(t *testing.T, b segment.Builder) {
fmt.Printf("print builder %x\n", unsafe.Pointer(b.(*builder)))
- fieldsIter, err := b.Fields()
+ fieldsIter, err := b.FieldsPostingsList()
require.NoError(t, err)
for fieldsIter.Next() {
- curr := fieldsIter.Current()
+ curr, _ := fieldsIter.Current()
fmt.Printf("builder field: %v\n", string(curr))
termsIter, err := b.Terms(curr)
require.NoError(t, err)
diff --git a/src/m3ninx/index/segment/builder/bytes_slice_iter.go b/src/m3ninx/index/segment/builder/bytes_slice_iter.go
index 336f1e0e1a..f627388d89 100644
--- a/src/m3ninx/index/segment/builder/bytes_slice_iter.go
+++ b/src/m3ninx/index/segment/builder/bytes_slice_iter.go
@@ -22,72 +22,123 @@ package builder
import (
"bytes"
- "sort"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/m3ninx/postings"
+
+ "github.com/twotwotwo/sorts"
)
-// OrderedBytesSliceIter is a new ordered bytes slice iterator.
-type OrderedBytesSliceIter struct {
+type uniqueField struct {
+ field []byte
+ postingsList postings.List
+}
+
+// orderedFieldsPostingsListIter is a new ordered fields/postings list iterator.
+type orderedFieldsPostingsListIter struct {
err error
done bool
- currentIdx int
- current []byte
- backingSlice [][]byte
+ currentIdx int
+ current uniqueField
+ backingSlices *sortableSliceOfSliceOfUniqueFieldsAsc
}
-var _ segment.FieldsIterator = &OrderedBytesSliceIter{}
+var _ segment.FieldsPostingsListIterator = &orderedFieldsPostingsListIter{}
-// NewOrderedBytesSliceIter sorts a slice of bytes and then
+// newOrderedFieldsPostingsListIter sorts a slice of slices of unique fields and then
// returns an iterator over them.
-func NewOrderedBytesSliceIter(
- maybeUnorderedSlice [][]byte,
-) *OrderedBytesSliceIter {
- sortSliceOfByteSlices(maybeUnorderedSlice)
- return &OrderedBytesSliceIter{
- currentIdx: -1,
- backingSlice: maybeUnorderedSlice,
+func newOrderedFieldsPostingsListIter(
+ maybeUnorderedFields [][]uniqueField,
+) *orderedFieldsPostingsListIter {
+ sortable := &sortableSliceOfSliceOfUniqueFieldsAsc{data: maybeUnorderedFields}
+ // NB(r): See SetSortConcurrency why this RLock is required.
+ sortConcurrencyLock.RLock()
+ sorts.ByBytes(sortable)
+ sortConcurrencyLock.RUnlock()
+ return &orderedFieldsPostingsListIter{
+ currentIdx: -1,
+ backingSlices: sortable,
}
}
// Next returns true if there is a next result.
-func (b *OrderedBytesSliceIter) Next() bool {
+func (b *orderedFieldsPostingsListIter) Next() bool {
if b.done || b.err != nil {
return false
}
b.currentIdx++
- if b.currentIdx >= len(b.backingSlice) {
+ if b.currentIdx >= b.backingSlices.Len() {
b.done = true
return false
}
- b.current = b.backingSlice[b.currentIdx]
+ iOuter, iInner := b.backingSlices.getIndices(b.currentIdx)
+ b.current = b.backingSlices.data[iOuter][iInner]
return true
}
// Current returns the current entry.
-func (b *OrderedBytesSliceIter) Current() []byte {
- return b.current
+func (b *orderedFieldsPostingsListIter) Current() ([]byte, postings.List) {
+ return b.current.field, b.current.postingsList
}
// Err returns an error if an error occurred iterating.
-func (b *OrderedBytesSliceIter) Err() error {
+func (b *orderedFieldsPostingsListIter) Err() error {
return nil
}
// Len returns the length of the slice.
-func (b *OrderedBytesSliceIter) Len() int {
- return len(b.backingSlice)
+func (b *orderedFieldsPostingsListIter) Len() int {
+ return b.backingSlices.Len()
}
// Close releases resources.
-func (b *OrderedBytesSliceIter) Close() error {
- b.current = nil
+func (b *orderedFieldsPostingsListIter) Close() error {
+ b.current = uniqueField{}
return nil
}
-func sortSliceOfByteSlices(b [][]byte) {
- sort.Slice(b, func(i, j int) bool {
- return bytes.Compare(b[i], b[j]) < 0
- })
+type sortableSliceOfSliceOfUniqueFieldsAsc struct {
+ data [][]uniqueField
+ length int
+}
+
+func (s *sortableSliceOfSliceOfUniqueFieldsAsc) Len() int {
+ if s.length > 0 {
+ return s.length
+ }
+
+ totalLen := 0
+ for _, innerSlice := range s.data {
+ totalLen += len(innerSlice)
+ }
+ s.length = totalLen
+
+ return s.length
+}
+
+func (s *sortableSliceOfSliceOfUniqueFieldsAsc) Less(i, j int) bool {
+ iOuter, iInner := s.getIndices(i)
+ jOuter, jInner := s.getIndices(j)
+ return bytes.Compare(s.data[iOuter][iInner].field, s.data[jOuter][jInner].field) < 0
+}
+
+func (s *sortableSliceOfSliceOfUniqueFieldsAsc) Swap(i, j int) {
+ iOuter, iInner := s.getIndices(i)
+ jOuter, jInner := s.getIndices(j)
+ s.data[iOuter][iInner], s.data[jOuter][jInner] = s.data[jOuter][jInner], s.data[iOuter][iInner]
+}
+
+func (s *sortableSliceOfSliceOfUniqueFieldsAsc) Key(i int) []byte {
+ iOuter, iInner := s.getIndices(i)
+ return s.data[iOuter][iInner].field
+}
+
+func (s *sortableSliceOfSliceOfUniqueFieldsAsc) getIndices(idx int) (int, int) {
+ currentSliceIdx := 0
+ for idx >= len(s.data[currentSliceIdx]) {
+ idx -= len(s.data[currentSliceIdx])
+ currentSliceIdx++
+ }
+ return currentSliceIdx, idx
}
diff --git a/src/m3ninx/index/segment/builder/fields_map_new.go b/src/m3ninx/index/segment/builder/fields_map_new.go
index d9d4953c91..015a1e21eb 100644
--- a/src/m3ninx/index/segment/builder/fields_map_new.go
+++ b/src/m3ninx/index/segment/builder/fields_map_new.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/m3ninx/index/segment/builder/ids_map_new.go b/src/m3ninx/index/segment/builder/ids_map_new.go
index 9fb57a4dfa..757181e444 100644
--- a/src/m3ninx/index/segment/builder/ids_map_new.go
+++ b/src/m3ninx/index/segment/builder/ids_map_new.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/m3ninx/index/segment/builder/multi_segments_builder.go b/src/m3ninx/index/segment/builder/multi_segments_builder.go
index 6f29c1b572..459b93f524 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_builder.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_builder.go
@@ -36,7 +36,6 @@ type builderFromSegments struct {
idSet *IDsMap
segments []segmentMetadata
termsIter *termsIterFromSegments
- offset postings.ID
segmentsOffset postings.ID
}
@@ -60,9 +59,7 @@ func NewBuilderFromSegments(opts Options) segment.SegmentsBuilder {
}
}
-func (b *builderFromSegments) Reset(offset postings.ID) {
- b.offset = offset
-
+func (b *builderFromSegments) Reset() {
// Reset the documents slice
var emptyDoc doc.Document
for i := range b.docs {
@@ -152,13 +149,12 @@ func (b *builderFromSegments) Docs() []doc.Document {
}
func (b *builderFromSegments) AllDocs() (index.IDDocIterator, error) {
- rangeIter := postings.NewRangeIterator(b.offset,
- b.offset+postings.ID(len(b.docs)))
+ rangeIter := postings.NewRangeIterator(0, postings.ID(len(b.docs)))
return index.NewIDDocIterator(b, rangeIter), nil
}
func (b *builderFromSegments) Doc(id postings.ID) (doc.Document, error) {
- idx := int(id - b.offset)
+ idx := int(id)
if idx < 0 || idx >= len(b.docs) {
return doc.Document{}, errDocNotFound
}
@@ -178,6 +174,10 @@ func (b *builderFromSegments) Fields() (segment.FieldsIterator, error) {
return newFieldIterFromSegments(b.segments)
}
+func (b *builderFromSegments) FieldsPostingsList() (segment.FieldsPostingsListIterator, error) {
+ return newFieldPostingsListIterFromSegments(b.segments)
+}
+
func (b *builderFromSegments) Terms(field []byte) (segment.TermsIterator, error) {
if err := b.termsIter.setField(field); err != nil {
return nil, err
diff --git a/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go b/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go
index 31b07c59e5..f7fc85d2a2 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go
@@ -153,7 +153,7 @@ func newTestSegmentWithDocs(
t *testing.T,
docs []doc.Document,
) segment.Segment {
- seg, err := mem.NewSegment(0, testMemOptions)
+ seg, err := mem.NewSegment(testMemOptions)
require.NoError(t, err)
defer func() {
diff --git a/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter.go b/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter.go
new file mode 100644
index 0000000000..08517b224f
--- /dev/null
+++ b/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package builder
+
+import (
+ "github.com/m3db/m3/src/m3ninx/index/segment"
+ xerrors "github.com/m3db/m3/src/x/errors"
+)
+
+// Ensure for our use case that the multi key iterator we return
+// matches the signature for the fields iterator.
+var _ segment.FieldsPostingsListIterator = &multiKeyPostingsListIterator{}
+
+func newFieldPostingsListIterFromSegments(
+ segments []segmentMetadata,
+) (segment.FieldsPostingsListIterator, error) {
+ multiIter := newMultiKeyPostingsListIterator()
+ for _, seg := range segments {
+ iter, err := seg.segment.FieldsIterable().Fields()
+ if err != nil {
+ return nil, err
+ }
+ if !iter.Next() {
+ // Don't consume this iterator if no results.
+ if err := xerrors.FirstError(iter.Err(), iter.Close()); err != nil {
+ return nil, err
+ }
+ continue
+ }
+
+ multiIter.add(&fieldsKeyIter{
+ iter: iter,
+ segment: seg,
+ })
+ }
+
+ return multiIter, nil
+}
+
+// fieldsKeyIter needs to be a keyIterator and contains a terms iterator
+var _ keyIterator = &fieldsKeyIter{}
+
+type fieldsKeyIter struct {
+ iter segment.FieldsIterator
+ segment segmentMetadata
+}
+
+func (i *fieldsKeyIter) Next() bool {
+ return i.iter.Next()
+}
+
+func (i *fieldsKeyIter) Current() []byte {
+ return i.iter.Current()
+}
+
+func (i *fieldsKeyIter) Err() error {
+ return i.iter.Err()
+}
+
+func (i *fieldsKeyIter) Close() error {
+ return i.iter.Close()
+}
diff --git a/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter_test.go b/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter_test.go
new file mode 100644
index 0000000000..651a4ed346
--- /dev/null
+++ b/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter_test.go
@@ -0,0 +1,139 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package builder
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/index/segment"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestFieldPostingsListIterFromSegments(t *testing.T) {
+ segments := []segment.Segment{
+ newTestSegmentWithDocs(t, []doc.Document{
+ {
+ ID: []byte("bux_0"),
+ Fields: []doc.Field{
+ {Name: []byte("fruit"), Value: []byte("apple")},
+ {Name: []byte("vegetable"), Value: []byte("carrot")},
+ {Name: []byte("infrequent"), Value: []byte("val0")},
+ },
+ },
+ {
+ ID: []byte("bar_0"),
+ Fields: []doc.Field{
+ {Name: []byte("cat"), Value: []byte("rhymes")},
+ {Name: []byte("hat"), Value: []byte("with")},
+ {Name: []byte("bat"), Value: []byte("pat")},
+ },
+ },
+ }),
+ newTestSegmentWithDocs(t, []doc.Document{
+ {
+ ID: []byte("foo_0"),
+ Fields: []doc.Field{
+ {Name: []byte("fruit"), Value: []byte("apple")},
+ {Name: []byte("vegetable"), Value: []byte("carrot")},
+ {Name: []byte("infrequent"), Value: []byte("val0")},
+ },
+ },
+ {
+ ID: []byte("bux_1"),
+ Fields: []doc.Field{
+ {Name: []byte("delta"), Value: []byte("22")},
+ {Name: []byte("gamma"), Value: []byte("33")},
+ {Name: []byte("theta"), Value: []byte("44")},
+ },
+ },
+ }),
+ newTestSegmentWithDocs(t, []doc.Document{
+ {
+ ID: []byte("bar_1"),
+ Fields: []doc.Field{
+ {Name: []byte("cat"), Value: []byte("rhymes")},
+ {Name: []byte("hat"), Value: []byte("with")},
+ {Name: []byte("bat"), Value: []byte("pat")},
+ },
+ },
+ {
+ ID: []byte("foo_1"),
+ Fields: []doc.Field{
+ {Name: []byte("fruit"), Value: []byte("apple")},
+ {Name: []byte("vegetable"), Value: []byte("carrot")},
+ {Name: []byte("infrequent"), Value: []byte("val1")},
+ },
+ },
+ {
+ ID: []byte("baz_0"),
+ Fields: []doc.Field{
+ {Name: []byte("fruit"), Value: []byte("watermelon")},
+ {Name: []byte("color"), Value: []byte("green")},
+ {Name: []byte("alpha"), Value: []byte("0.5")},
+ },
+ },
+ {
+ ID: []byte("bux_2"),
+ Fields: []doc.Field{
+ {Name: []byte("delta"), Value: []byte("22")},
+ {Name: []byte("gamma"), Value: []byte("33")},
+ {Name: []byte("theta"), Value: []byte("44")},
+ },
+ },
+ }),
+ }
+ builder := NewBuilderFromSegments(testOptions)
+ builder.Reset()
+
+ b, ok := builder.(*builderFromSegments)
+ require.True(t, ok)
+ require.NoError(t, builder.AddSegments(segments))
+ iter, err := b.FieldsPostingsList()
+ require.NoError(t, err)
+ // Perform both present/not present checks per field/field postings list.
+ for iter.Next() {
+ field, pl := iter.Current()
+ docIter, err := b.AllDocs()
+ require.NoError(t, err)
+ for docIter.Next() {
+ doc := docIter.Current()
+ pID := docIter.PostingsID()
+ found := checkIfFieldExistsInDoc(field, doc)
+ require.Equal(t, found, pl.Contains(pID))
+ }
+ }
+}
+
+func checkIfFieldExistsInDoc(
+ field []byte,
+ doc doc.Document,
+) bool {
+ found := false
+ for _, f := range doc.Fields {
+ if bytes.Equal(field, f.Name) {
+ found = true
+ }
+ }
+ return found
+}
diff --git a/src/m3ninx/index/segment/builder/multi_segments_multi_key_postings_list_iter.go b/src/m3ninx/index/segment/builder/multi_segments_multi_key_postings_list_iter.go
new file mode 100644
index 0000000000..206be79fd3
--- /dev/null
+++ b/src/m3ninx/index/segment/builder/multi_segments_multi_key_postings_list_iter.go
@@ -0,0 +1,247 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package builder
+
+import (
+ "bytes"
+
+ "github.com/m3db/m3/src/m3ninx/index"
+ "github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/m3ninx/postings"
+ "github.com/m3db/m3/src/m3ninx/postings/roaring"
+ xerrors "github.com/m3db/m3/src/x/errors"
+ bitmap "github.com/m3dbx/pilosa/roaring"
+)
+
+var _ segment.FieldsPostingsListIterator = &multiKeyPostingsListIterator{}
+
+type multiKeyPostingsListIterator struct {
+ err error
+ firstNext bool
+ closeIters []keyIterator
+ iters []keyIterator
+ currIters []keyIterator
+ currReaders []index.Reader
+ currFieldPostingsList postings.MutableList
+ bitmapIter *bitmap.Iterator
+}
+
+func newMultiKeyPostingsListIterator() *multiKeyPostingsListIterator {
+ b := bitmap.NewBitmapWithDefaultPooling(defaultBitmapContainerPooling)
+ i := &multiKeyPostingsListIterator{
+ currFieldPostingsList: roaring.NewPostingsListFromBitmap(b),
+ bitmapIter: &bitmap.Iterator{},
+ }
+ i.reset()
+ return i
+}
+
+func (i *multiKeyPostingsListIterator) reset() {
+ i.firstNext = true
+ i.currFieldPostingsList.Reset()
+
+ for j := range i.closeIters {
+ i.closeIters[j] = nil
+ }
+ i.closeIters = i.closeIters[:0]
+
+ for j := range i.iters {
+ i.iters[j] = nil
+ }
+ i.iters = i.iters[:0]
+
+ for j := range i.currIters {
+ i.currIters[j] = nil
+ }
+ i.currIters = i.currIters[:0]
+}
+
+func (i *multiKeyPostingsListIterator) add(iter keyIterator) {
+ i.closeIters = append(i.closeIters, iter)
+ i.iters = append(i.iters, iter)
+ i.tryAddCurr(iter)
+}
+
+func (i *multiKeyPostingsListIterator) Next() bool {
+ if i.err != nil {
+ return false
+ }
+ if len(i.iters) == 0 {
+ return false
+ }
+
+ if i.firstNext {
+ i.firstNext = false
+ return true
+ }
+
+ for _, currIter := range i.currIters {
+ currNext := currIter.Next()
+ if currNext {
+ // Next has a value, forward other matching too
+ continue
+ }
+
+ // Remove iter
+ n := len(i.iters)
+ idx := -1
+ for j, iter := range i.iters {
+ if iter == currIter {
+ idx = j
+ break
+ }
+ }
+ i.iters[idx] = i.iters[n-1]
+ i.iters[n-1] = nil
+ i.iters = i.iters[:n-1]
+ }
+ if len(i.iters) == 0 {
+ return false
+ }
+
+ // Re-evaluate current value
+ i.currEvaluate()
+
+ // NB(bodu): Build the postings list for this field if the field has changed.
+ defer func() {
+ for idx, reader := range i.currReaders {
+ if err := reader.Close(); err != nil {
+ i.err = err
+ }
+ i.currReaders[idx] = nil
+ }
+ i.currReaders = i.currReaders[:0]
+ }()
+
+ i.currFieldPostingsList.Reset()
+ currField := i.currIters[0].Current()
+
+ for _, iter := range i.currIters {
+ fieldsKeyIter := iter.(*fieldsKeyIter)
+ reader, err := fieldsKeyIter.segment.segment.Reader()
+ if err != nil {
+ i.err = err
+ return false
+ }
+ i.currReaders = append(i.currReaders, reader)
+
+ pl, err := reader.MatchField(currField)
+ if err != nil {
+ i.err = err
+ return false
+ }
+
+ if fieldsKeyIter.segment.offset == 0 {
+ // No offset, which means is first segment we are combining from
+ // so can just direct union
+ i.currFieldPostingsList.Union(pl)
+ continue
+ }
+
+ // We have to taken into account the offset and duplicates
+ var (
+ iter = i.bitmapIter
+ duplicates = fieldsKeyIter.segment.duplicatesAsc
+ negativeOffset postings.ID
+ )
+ bitmap, ok := roaring.BitmapFromPostingsList(pl)
+ if !ok {
+ i.err = errPostingsListNotRoaring
+ return false
+ }
+
+ iter.Reset(bitmap)
+ for v, eof := iter.Next(); !eof; v, eof = iter.Next() {
+ curr := postings.ID(v)
+ for len(duplicates) > 0 && curr > duplicates[0] {
+ duplicates = duplicates[1:]
+ negativeOffset++
+ }
+ if len(duplicates) > 0 && curr == duplicates[0] {
+ duplicates = duplicates[1:]
+ negativeOffset++
+ // Also skip this value, as itself is a duplicate
+ continue
+ }
+ value := curr + fieldsKeyIter.segment.offset - negativeOffset
+ if err := i.currFieldPostingsList.Insert(value); err != nil {
+ i.err = err
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (i *multiKeyPostingsListIterator) currEvaluate() {
+ i.currIters = i.currIters[:0]
+ for _, iter := range i.iters {
+ i.tryAddCurr(iter)
+ }
+}
+
+func (i *multiKeyPostingsListIterator) tryAddCurr(iter keyIterator) {
+ var (
+ hasCurr = len(i.currIters) > 0
+ cmp int
+ )
+ if hasCurr {
+ curr, _ := i.Current()
+ cmp = bytes.Compare(iter.Current(), curr)
+ }
+ if !hasCurr || cmp < 0 {
+ // Set the current lowest key value
+ i.currIters = i.currIters[:0]
+ i.currIters = append(i.currIters, iter)
+ } else if hasCurr && cmp == 0 {
+ // Set a matching duplicate curr iter
+ i.currIters = append(i.currIters, iter)
+ }
+}
+
+func (i *multiKeyPostingsListIterator) Current() ([]byte, postings.List) {
+ return i.currIters[0].Current(), i.currFieldPostingsList
+}
+
+func (i *multiKeyPostingsListIterator) CurrentIters() []keyIterator {
+ return i.currIters
+}
+
+func (i *multiKeyPostingsListIterator) Err() error {
+ multiErr := xerrors.NewMultiError()
+ for _, iter := range i.closeIters {
+ multiErr = multiErr.Add(iter.Err())
+ }
+ if i.err != nil {
+ multiErr = multiErr.Add(i.err)
+ }
+ return multiErr.FinalError()
+}
+
+func (i *multiKeyPostingsListIterator) Close() error {
+ multiErr := xerrors.NewMultiError()
+ for _, iter := range i.closeIters {
+ multiErr = multiErr.Add(iter.Close())
+ }
+ // Free resources
+ i.reset()
+ return multiErr.FinalError()
+}
diff --git a/src/m3ninx/index/segment/builder/multi_segments_terms_iter.go b/src/m3ninx/index/segment/builder/multi_segments_terms_iter.go
index 0bfe8daa16..d27eb3ec8d 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_terms_iter.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_terms_iter.go
@@ -27,7 +27,7 @@ import (
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/postings/roaring"
xerrors "github.com/m3db/m3/src/x/errors"
- bitmap "github.com/m3db/pilosa/roaring"
+ bitmap "github.com/m3dbx/pilosa/roaring"
)
const (
@@ -174,7 +174,10 @@ func (i *termsIterFromSegments) Next() bool {
continue
}
value := curr + termsKeyIter.segment.offset - negativeOffset
- _ = i.currPostingsList.Insert(value)
+ if err := i.currPostingsList.Insert(value); err != nil {
+ i.err = err
+ return false
+ }
}
}
diff --git a/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go b/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go
index 0adb11fed5..65219d12c5 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go
@@ -80,7 +80,7 @@ func TestTermsIterFromSegmentsDeduplicates(t *testing.T) {
}
builder := NewBuilderFromSegments(testOptions)
- builder.Reset(0)
+ builder.Reset()
require.NoError(t, builder.AddSegments(segments))
iter, err := builder.Terms([]byte("fruit"))
require.NoError(t, err)
diff --git a/src/m3ninx/index/segment/builder/options.go b/src/m3ninx/index/segment/builder/options.go
index d96800c209..e2b69b0719 100644
--- a/src/m3ninx/index/segment/builder/options.go
+++ b/src/m3ninx/index/segment/builder/options.go
@@ -21,6 +21,8 @@
package builder
import (
+ "runtime"
+
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/postings/roaring"
"github.com/m3db/m3/src/m3ninx/util"
@@ -30,6 +32,10 @@ const (
defaultInitialCapacity = 128
)
+var (
+ defaultConcurrency = runtime.NumCPU()
+)
+
// Options is a collection of options for segment building.
type Options interface {
// SetNewUUIDFn sets the function used to generate new UUIDs.
@@ -49,12 +55,19 @@ type Options interface {
// PostingsListPool returns the postings list pool.
PostingsListPool() postings.Pool
+
+ // SetConcurrency sets the indexing concurrency.
+ SetConcurrency(value int) Options
+
+ // Concurrency returns the indexing concurrency.
+ Concurrency() int
}
type opts struct {
newUUIDFn util.NewUUIDFn
initialCapacity int
postingsPool postings.Pool
+ concurrency int
}
// NewOptions returns new options.
@@ -63,6 +76,7 @@ func NewOptions() Options {
newUUIDFn: util.NewUUID,
initialCapacity: defaultInitialCapacity,
postingsPool: postings.NewPool(nil, roaring.NewPostingsList),
+ concurrency: defaultConcurrency,
}
}
@@ -95,3 +109,13 @@ func (o *opts) SetPostingsListPool(v postings.Pool) Options {
func (o *opts) PostingsListPool() postings.Pool {
return o.postingsPool
}
+
+func (o *opts) SetConcurrency(v int) Options {
+ opts := *o
+ opts.concurrency = v
+ return &opts
+}
+
+func (o *opts) Concurrency() int {
+ return o.concurrency
+}
diff --git a/src/m3ninx/index/segment/builder/postings_map_new.go b/src/m3ninx/index/segment/builder/postings_map_new.go
index 8984fff77d..5fb871a420 100644
--- a/src/m3ninx/index/segment/builder/postings_map_new.go
+++ b/src/m3ninx/index/segment/builder/postings_map_new.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/m3ninx/index/segment/builder/sharded_fields_map.go b/src/m3ninx/index/segment/builder/sharded_fields_map.go
new file mode 100644
index 0000000000..ffa452c94c
--- /dev/null
+++ b/src/m3ninx/index/segment/builder/sharded_fields_map.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package builder
+
+type shardedFieldsMap struct {
+ data []*fieldsMap
+}
+
+func newShardedFieldsMap(
+ numShards int,
+ shardInitialCapacity int,
+) *shardedFieldsMap {
+ data := make([]*fieldsMap, 0, numShards)
+ for i := 0; i < numShards; i++ {
+ data = append(data, newFieldsMap(fieldsMapOptions{
+ InitialSize: shardInitialCapacity,
+ }))
+ }
+ return &shardedFieldsMap{
+ data: data,
+ }
+}
+
+func (s *shardedFieldsMap) ShardedGet(
+ shard int,
+ k []byte,
+) (*terms, bool) {
+ return s.data[shard].Get(k)
+}
+
+func (s *shardedFieldsMap) ShardedSetUnsafe(
+ shard int,
+ k []byte,
+ v *terms,
+ opts fieldsMapSetUnsafeOptions,
+) {
+ s.data[shard].SetUnsafe(k, v, opts)
+}
+
+// ResetTermsSets keeps fields around but resets the terms set for each one.
+func (s *shardedFieldsMap) ResetTermsSets() {
+ for _, fieldMap := range s.data {
+ for _, entry := range fieldMap.Iter() {
+ entry.Value().reset()
+ }
+ }
+}
diff --git a/src/m3ninx/index/segment/builder/terms.go b/src/m3ninx/index/segment/builder/terms.go
index a7c652f6f0..4fde080672 100644
--- a/src/m3ninx/index/segment/builder/terms.go
+++ b/src/m3ninx/index/segment/builder/terms.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -31,6 +31,7 @@ type terms struct {
opts Options
pool postings.Pool
postings *PostingsMap
+ postingsListUnion postings.MutableList
uniqueTerms []termElem
uniqueTermsIsSorted bool
}
@@ -41,10 +42,12 @@ type termElem struct {
}
func newTerms(opts Options) *terms {
+ pool := opts.PostingsListPool()
return &terms{
- opts: opts,
- pool: opts.PostingsListPool(),
- postings: NewPostingsMap(PostingsMapOptions{}),
+ opts: opts,
+ pool: pool,
+ postingsListUnion: pool.Get(),
+ postings: NewPostingsMap(PostingsMapOptions{}),
}
}
@@ -69,6 +72,9 @@ func (t *terms) post(term []byte, id postings.ID) error {
if err := postingsList.Insert(id); err != nil {
return err
}
+ if err := t.postingsListUnion.Insert(id); err != nil {
+ return err
+ }
if newTerm {
t.uniqueTerms = append(t.uniqueTerms, termElem{
term: term,
@@ -90,7 +96,11 @@ func (t *terms) sortIfRequired() {
return
}
+ // NB(r): See SetSortConcurrency why this RLock is required.
+ sortConcurrencyLock.RLock()
sorts.ByBytes(t)
+ sortConcurrencyLock.RUnlock()
+
t.uniqueTermsIsSorted = true
}
@@ -100,6 +110,7 @@ func (t *terms) reset() {
t.pool.Put(entry.Value())
}
t.postings.Reset()
+ t.postingsListUnion.Reset()
// Reset the unique terms slice
var emptyTerm termElem
diff --git a/src/m3ninx/index/segment/builder/terms_test.go b/src/m3ninx/index/segment/builder/terms_test.go
new file mode 100644
index 0000000000..0a17d7a062
--- /dev/null
+++ b/src/m3ninx/index/segment/builder/terms_test.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package builder
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/m3ninx/postings"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestTermsReuse(t *testing.T) {
+ terms := newTerms(NewOptions())
+
+ require.NoError(t, terms.post([]byte("term"), postings.ID(1)))
+ require.Equal(t, terms.size(), 1)
+ require.Equal(t, terms.postings.Len(), 1)
+ require.Equal(t, terms.postingsListUnion.Len(), 1)
+
+ terms.reset()
+ require.Equal(t, terms.size(), 0)
+ require.Equal(t, terms.postings.Len(), 0)
+ require.Equal(t, terms.postingsListUnion.Len(), 0)
+}
diff --git a/src/m3ninx/index/segment/fst/docs_writer.go b/src/m3ninx/index/segment/fst/docs_writer.go
new file mode 100644
index 0000000000..1fdfe6a011
--- /dev/null
+++ b/src/m3ninx/index/segment/fst/docs_writer.go
@@ -0,0 +1,95 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fst
+
+import (
+ "io"
+
+ "github.com/m3db/m3/src/m3ninx/index"
+ "github.com/m3db/m3/src/m3ninx/index/segment/fst/encoding/docs"
+)
+
+// DocumentsWriter writes out documents data given a doc iterator.
+type DocumentsWriter struct {
+ iter index.IDDocIterator
+ sizeHint int
+ docDataWriter *docs.DataWriter
+ docIndexWriter *docs.IndexWriter
+ docOffsets []docOffset
+}
+
+// NewDocumentsWriter creates a new documents writer.
+func NewDocumentsWriter() (*DocumentsWriter, error) {
+ return &DocumentsWriter{
+ docDataWriter: docs.NewDataWriter(nil),
+ docIndexWriter: docs.NewIndexWriter(nil),
+ docOffsets: make([]docOffset, 0, defaultInitialDocOffsetsSize),
+ }, nil
+}
+
+// DocumentsWriterOptions is a set of options to pass to the documents writer.
+type DocumentsWriterOptions struct {
+ // Iter is the ID and document iterator, required.
+ Iter index.IDDocIterator
+ // SizeHint is the size hint, optional.
+ SizeHint int
+}
+
+// Reset the documents writer for writing out.
+func (w *DocumentsWriter) Reset(opts DocumentsWriterOptions) {
+ w.iter = opts.Iter
+ w.sizeHint = opts.SizeHint
+ w.docDataWriter.Reset(nil)
+ w.docIndexWriter.Reset(nil)
+ w.docOffsets = w.docOffsets[:0]
+}
+
+// WriteDocumentsData writes out the documents data.
+func (w *DocumentsWriter) WriteDocumentsData(iow io.Writer) error {
+ w.docDataWriter.Reset(iow)
+
+ var currOffset uint64
+ if cap(w.docOffsets) < w.sizeHint {
+ w.docOffsets = make([]docOffset, 0, w.sizeHint)
+ }
+ for w.iter.Next() {
+ id, doc := w.iter.PostingsID(), w.iter.Current()
+ n, err := w.docDataWriter.Write(doc)
+ if err != nil {
+ return err
+ }
+ w.docOffsets = append(w.docOffsets, docOffset{ID: id, offset: currOffset})
+ currOffset += uint64(n)
+ }
+
+ return nil
+}
+
+// WriteDocumentsIndex writes out the documents index data.
+func (w *DocumentsWriter) WriteDocumentsIndex(iow io.Writer) error {
+ w.docIndexWriter.Reset(iow)
+ for _, do := range w.docOffsets {
+ if err := w.docIndexWriter.Write(do.ID, do.offset); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/src/m3ninx/index/segment/fst/encoding/docs/slice.go b/src/m3ninx/index/segment/fst/encoding/docs/slice.go
index ea3620612d..ccf7fd48b6 100644
--- a/src/m3ninx/index/segment/fst/encoding/docs/slice.go
+++ b/src/m3ninx/index/segment/fst/encoding/docs/slice.go
@@ -24,6 +24,7 @@ import (
"errors"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/postings"
)
@@ -31,21 +32,18 @@ var (
errDocNotFound = errors.New("doc not found")
)
+var _ Reader = (*SliceReader)(nil)
+var _ index.DocRetriever = (*SliceReader)(nil)
+
// SliceReader is a docs slice reader for use with documents
// stored in memory.
type SliceReader struct {
- offset postings.ID
- docs []doc.Document
+ docs []doc.Document
}
// NewSliceReader returns a new docs slice reader.
-func NewSliceReader(offset postings.ID, docs []doc.Document) *SliceReader {
- return &SliceReader{offset: offset, docs: docs}
-}
-
-// Base returns the postings ID base offset of the slice reader.
-func (r *SliceReader) Base() postings.ID {
- return r.offset
+func NewSliceReader(docs []doc.Document) *SliceReader {
+ return &SliceReader{docs: docs}
}
// Len returns the number of documents in the slice reader.
@@ -55,10 +53,21 @@ func (r *SliceReader) Len() int {
// Read returns a document from the docs slice reader.
func (r *SliceReader) Read(id postings.ID) (doc.Document, error) {
- idx := int(id - r.offset)
+ idx := int(id)
if idx < 0 || idx >= len(r.docs) {
return doc.Document{}, errDocNotFound
}
return r.docs[idx], nil
}
+
+// Doc implements DocRetriever and reads the document with postings ID.
+func (r *SliceReader) Doc(id postings.ID) (doc.Document, error) {
+ return r.Read(id)
+}
+
+// Iter returns a docs iterator.
+func (r *SliceReader) Iter() index.IDDocIterator {
+ postingsIter := postings.NewRangeIterator(0, postings.ID(r.Len()))
+ return index.NewIDDocIterator(r, postingsIter)
+}
diff --git a/src/m3ninx/index/segment/fst/encoding/docs/types.go b/src/m3ninx/index/segment/fst/encoding/docs/types.go
new file mode 100644
index 0000000000..cf531d2608
--- /dev/null
+++ b/src/m3ninx/index/segment/fst/encoding/docs/types.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package docs
+
+import (
+ "github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/index"
+ "github.com/m3db/m3/src/m3ninx/postings"
+)
+
+// Reader is a document reader from an encoded source.
+type Reader interface {
+ // Len is the number of documents contained by the reader.
+ Len() int
+ // Read reads a document with the given postings ID.
+ Read(id postings.ID) (doc.Document, error)
+ // Iter returns a document iterator.
+ Iter() index.IDDocIterator
+}
diff --git a/src/m3ninx/index/segment/fst/fst_mock.go b/src/m3ninx/index/segment/fst/fst_mock.go
index ed19e11d35..85c5e98722 100644
--- a/src/m3ninx/index/segment/fst/fst_mock.go
+++ b/src/m3ninx/index/segment/fst/fst_mock.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/postings"
+ "github.com/m3db/m3/src/x/context"
"github.com/golang/mock/gomock"
)
@@ -386,10 +387,10 @@ func (mr *MockSegmentMockRecorder) MatchTerm(arg0, arg1 interface{}) *gomock.Cal
}
// Reader mocks base method
-func (m *MockSegment) Reader() (index.Reader, error) {
+func (m *MockSegment) Reader() (segment.Reader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reader")
- ret0, _ := ret[0].(index.Reader)
+ ret0, _ := ret[0].(segment.Reader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -400,6 +401,21 @@ func (mr *MockSegmentMockRecorder) Reader() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockSegment)(nil).Reader))
}
+// SegmentData mocks base method
+func (m *MockSegment) SegmentData(arg0 context.Context) (SegmentData, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SegmentData", arg0)
+ ret0, _ := ret[0].(SegmentData)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SegmentData indicates an expected call of SegmentData
+func (mr *MockSegmentMockRecorder) SegmentData(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SegmentData", reflect.TypeOf((*MockSegment)(nil).SegmentData), arg0)
+}
+
// Size mocks base method
func (m *MockSegment) Size() int64 {
m.ctrl.T.Helper()
diff --git a/src/m3ninx/index/segment/fst/fst_terms_iterator.go b/src/m3ninx/index/segment/fst/fst_terms_iterator.go
index d280bb5d5c..39327dc673 100644
--- a/src/m3ninx/index/segment/fst/fst_terms_iterator.go
+++ b/src/m3ninx/index/segment/fst/fst_terms_iterator.go
@@ -23,10 +23,12 @@ package fst
import (
sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
xerrors "github.com/m3db/m3/src/x/errors"
- "github.com/m3db/vellum"
+
+ "github.com/m3dbx/vellum"
)
type fstTermsIterOpts struct {
+ seg *fsSegment
fst *vellum.FST
finalizeFST bool
}
@@ -67,11 +69,7 @@ func (f *fstTermsIter) clear() {
func (f *fstTermsIter) reset(opts fstTermsIterOpts) {
f.clear()
-
f.opts = opts
- if err := f.iter.Reset(opts.fst, nil, nil, nil); err != nil {
- f.handleIterErr(err)
- }
}
func (f *fstTermsIter) handleIterErr(err error) {
@@ -87,8 +85,19 @@ func (f *fstTermsIter) Next() bool {
return false
}
+ f.opts.seg.RLock()
+ defer f.opts.seg.RUnlock()
+ if f.opts.seg.finalized {
+ f.err = errReaderFinalized
+ return false
+ }
+
if f.firstNext {
f.firstNext = false
+ if err := f.iter.Reset(f.opts.fst, nil, nil, nil); err != nil {
+ f.handleIterErr(err)
+ return false
+ }
} else {
if err := f.iter.Next(); err != nil {
f.handleIterErr(err)
diff --git a/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go b/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go
index 09cf5bb58b..b0c2c25224 100644
--- a/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go
+++ b/src/m3ninx/index/segment/fst/fst_terms_postings_iterator.go
@@ -24,7 +24,7 @@ import (
sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/postings"
postingsroaring "github.com/m3db/m3/src/m3ninx/postings/roaring"
- "github.com/m3db/pilosa/roaring"
+ "github.com/m3dbx/pilosa/roaring"
)
// postingsIterRoaringPoolingConfig uses a configuration that avoids allocating
@@ -34,19 +34,15 @@ var postingsIterRoaringPoolingConfig = roaring.ContainerPoolingConfiguration{
MaxArraySize: 0,
MaxRunsSize: 0,
AllocateBitmap: false,
- MaxCapacity: 128,
+ MaxCapacity: 0,
MaxKeysAndContainersSliceLength: 128 * 10,
}
-type postingsListRetriever interface {
- UnmarshalPostingsListBitmap(b *roaring.Bitmap, offset uint64) error
-}
-
type fstTermsPostingsIter struct {
bitmap *roaring.Bitmap
postings postings.List
- retriever postingsListRetriever
+ seg *fsSegment
termsIter *fstTermsIter
currTerm []byte
err error
@@ -66,19 +62,19 @@ var _ sgmt.TermsIterator = &fstTermsPostingsIter{}
func (f *fstTermsPostingsIter) clear() {
f.bitmap.Reset()
- f.retriever = nil
+ f.seg = nil
f.termsIter = nil
f.currTerm = nil
f.err = nil
}
func (f *fstTermsPostingsIter) reset(
- retriever postingsListRetriever,
+ seg *fsSegment,
termsIter *fstTermsIter,
) {
f.clear()
- f.retriever = retriever
+ f.seg = seg
f.termsIter = termsIter
}
@@ -93,8 +89,12 @@ func (f *fstTermsPostingsIter) Next() bool {
}
f.currTerm = f.termsIter.Current()
- f.err = f.retriever.UnmarshalPostingsListBitmap(f.bitmap,
- f.termsIter.CurrentOffset())
+ currOffset := f.termsIter.CurrentOffset()
+
+ f.seg.RLock()
+ f.err = f.seg.unmarshalPostingsListBitmapNotClosedMaybeFinalizedWithLock(f.bitmap,
+ currOffset)
+ f.seg.RUnlock()
return f.err == nil
}
diff --git a/src/m3ninx/index/segment/fst/fst_writer.go b/src/m3ninx/index/segment/fst/fst_writer.go
index aeb46128d2..4514f860cc 100644
--- a/src/m3ninx/index/segment/fst/fst_writer.go
+++ b/src/m3ninx/index/segment/fst/fst_writer.go
@@ -24,7 +24,7 @@ import (
"errors"
"io"
- "github.com/m3db/vellum"
+ "github.com/m3dbx/vellum"
)
var (
diff --git a/src/m3ninx/index/segment/fst/regexp/regexp.go b/src/m3ninx/index/segment/fst/regexp/regexp.go
index 1166b92e46..cd21d99f9f 100644
--- a/src/m3ninx/index/segment/fst/regexp/regexp.go
+++ b/src/m3ninx/index/segment/fst/regexp/regexp.go
@@ -25,7 +25,7 @@ package regexp
import (
"regexp/syntax"
- vregexp "github.com/m3db/vellum/regexp"
+ vregexp "github.com/m3dbx/vellum/regexp"
)
// ParseRegexp parses the provided regexp pattern into an equivalent matching automaton, and
diff --git a/src/m3ninx/index/segment/fst/segment.go b/src/m3ninx/index/segment/fst/segment.go
index f57d76c1b5..9940e9ef47 100644
--- a/src/m3ninx/index/segment/fst/segment.go
+++ b/src/m3ninx/index/segment/fst/segment.go
@@ -41,12 +41,13 @@ import (
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/mmap"
- pilosaroaring "github.com/m3db/pilosa/roaring"
- "github.com/m3db/vellum"
+ pilosaroaring "github.com/m3dbx/pilosa/roaring"
+ "github.com/m3dbx/vellum"
)
var (
errReaderClosed = errors.New("segment is closed")
+ errReaderFinalized = errors.New("segment is finalized")
errReaderNilRegexp = errors.New("nil regexp provided")
errUnsupportedMajorVersion = errors.New("unsupported major version")
errDocumentsDataUnset = errors.New("documents data bytes are not set")
@@ -71,7 +72,7 @@ type SegmentData struct {
// the docs data and docs idx data if the documents
// already reside in memory and we want to use the
// in memory references instead.
- DocsReader *docs.SliceReader
+ DocsReader docs.Reader
Closer io.Closer
}
@@ -130,38 +131,27 @@ func NewSegment(data SegmentData, opts Options) (Segment, error) {
}
var (
- docsSliceReader = data.DocsReader
- docsDataReader *docs.DataReader
- docsIndexReader *docs.IndexReader
- startInclusive postings.ID
- endExclusive postings.ID
+ docsThirdPartyReader = data.DocsReader
+ docsDataReader *docs.DataReader
+ docsIndexReader *docs.IndexReader
)
- if docsSliceReader != nil {
- startInclusive = docsSliceReader.Base()
- endExclusive = startInclusive + postings.ID(docsSliceReader.Len())
- } else {
+ if docsThirdPartyReader == nil {
docsDataReader = docs.NewDataReader(data.DocsData.Bytes)
docsIndexReader, err = docs.NewIndexReader(data.DocsIdxData.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to load documents index: %v", err)
}
-
- // NB(jeromefroe): Currently we assume the postings IDs are contiguous.
- startInclusive = docsIndexReader.Base()
- endExclusive = startInclusive + postings.ID(docsIndexReader.Len())
}
s := &fsSegment{
- fieldsFST: fieldsFST,
- docsDataReader: docsDataReader,
- docsIndexReader: docsIndexReader,
- docsSliceReader: docsSliceReader,
+ fieldsFST: fieldsFST,
+ docsDataReader: docsDataReader,
+ docsIndexReader: docsIndexReader,
+ docsThirdPartyReader: docsThirdPartyReader,
- data: data,
- opts: opts,
- numDocs: metadata.NumDocs,
- startInclusive: startInclusive,
- endExclusive: endExclusive,
+ data: data,
+ opts: opts,
+ numDocs: metadata.NumDocs,
}
// NB(r): The segment uses the context finalization to finalize
@@ -179,18 +169,30 @@ var _ segment.ImmutableSegment = (*fsSegment)(nil)
type fsSegment struct {
sync.RWMutex
- ctx context.Context
- closed bool
- fieldsFST *vellum.FST
- docsDataReader *docs.DataReader
- docsIndexReader *docs.IndexReader
- docsSliceReader *docs.SliceReader
- data SegmentData
- opts Options
+ ctx context.Context
+ closed bool
+ finalized bool
+ fieldsFST *vellum.FST
+ docsDataReader *docs.DataReader
+ docsIndexReader *docs.IndexReader
+ docsThirdPartyReader docs.Reader
+ data SegmentData
+ opts Options
+
+ numDocs int64
+}
+
+func (r *fsSegment) SegmentData(ctx context.Context) (SegmentData, error) {
+ r.RLock()
+ defer r.RUnlock()
+ if r.closed {
+ return SegmentData{}, errReaderClosed
+ }
- numDocs int64
- startInclusive postings.ID
- endExclusive postings.ID
+ // NB(r): Ensure that we do not release, mmaps, etc
+ // until all readers have been closed.
+ r.ctx.DependsOn(ctx)
+ return r.data, nil
}
func (r *fsSegment) Size() int64 {
@@ -236,14 +238,14 @@ func (r *fsSegment) ContainsField(field []byte) (bool, error) {
return r.fieldsFST.Contains(field)
}
-func (r *fsSegment) Reader() (index.Reader, error) {
+func (r *fsSegment) Reader() (sgmt.Reader, error) {
r.RLock()
defer r.RUnlock()
if r.closed {
return nil, errReaderClosed
}
- reader := newReader(r)
+ reader := newReader(r, r.opts)
// NB(r): Ensure that we do not release, mmaps, etc
// until all readers have been closed.
@@ -267,10 +269,13 @@ func (r *fsSegment) Close() error {
}
func (r *fsSegment) Finalize() {
+ r.Lock()
r.fieldsFST.Close()
if r.data.Closer != nil {
r.data.Closer.Close()
}
+ r.finalized = true
+ r.Unlock()
}
func (r *fsSegment) FieldsIterable() sgmt.FieldsIterable {
@@ -286,6 +291,7 @@ func (r *fsSegment) Fields() (sgmt.FieldsIterator, error) {
iter := newFSTTermsIter()
iter.reset(fstTermsIterOpts{
+ seg: r,
fst: r.fieldsFST,
finalizeFST: false,
})
@@ -337,12 +343,31 @@ type termsIterable struct {
postingsIter *fstTermsPostingsIter
}
+func newTermsIterable(r *fsSegment) *termsIterable {
+ return &termsIterable{
+ r: r,
+ fieldsIter: newFSTTermsIter(),
+ postingsIter: newFSTTermsPostingsIter(),
+ }
+}
+
func (i *termsIterable) Terms(field []byte) (sgmt.TermsIterator, error) {
i.r.RLock()
defer i.r.RUnlock()
if i.r.closed {
return nil, errReaderClosed
}
+ return i.termsNotClosedMaybeFinalizedWithRLock(field)
+}
+
+func (i *termsIterable) termsNotClosedMaybeFinalizedWithRLock(
+ field []byte,
+) (sgmt.TermsIterator, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if i.r.finalized {
+ return nil, errReaderFinalized
+ }
termsFST, exists, err := i.r.retrieveTermsFSTWithRLock(field)
if err != nil {
@@ -354,6 +379,7 @@ func (i *termsIterable) Terms(field []byte) (sgmt.TermsIterator, error) {
}
i.fieldsIter.reset(fstTermsIterOpts{
+ seg: i.r,
fst: termsFST,
finalizeFST: true,
})
@@ -368,6 +394,14 @@ func (r *fsSegment) UnmarshalPostingsListBitmap(b *pilosaroaring.Bitmap, offset
return errReaderClosed
}
+ return r.unmarshalPostingsListBitmapNotClosedMaybeFinalizedWithLock(b, offset)
+}
+
+func (r *fsSegment) unmarshalPostingsListBitmapNotClosedMaybeFinalizedWithLock(b *pilosaroaring.Bitmap, offset uint64) error {
+ if r.finalized {
+ return errReaderFinalized
+ }
+
postingsBytes, err := r.retrieveBytesWithRLock(r.data.PostingsData.Bytes, offset)
if err != nil {
return fmt.Errorf("unable to retrieve postings data: %v", err)
@@ -383,9 +417,21 @@ func (r *fsSegment) MatchField(field []byte) (postings.List, error) {
if r.closed {
return nil, errReaderClosed
}
+ return r.matchFieldNotClosedMaybeFinalizedWithRLock(field)
+}
+
+func (r *fsSegment) matchFieldNotClosedMaybeFinalizedWithRLock(
+ field []byte,
+) (postings.List, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if r.finalized {
+ return nil, errReaderFinalized
+ }
+
if !r.data.Version.supportsFieldPostingsList() {
// i.e. don't have the field level postings list, so fall back to regexp
- return r.matchRegexpWithRLock(field, index.DotStarCompiledRegex())
+ return r.matchRegexpNotClosedMaybeFinalizedWithRLock(field, index.DotStarCompiledRegex())
}
termsFSTOffset, exists, err := r.fieldsFST.Get(field)
@@ -417,6 +463,17 @@ func (r *fsSegment) MatchTerm(field []byte, term []byte) (postings.List, error)
if r.closed {
return nil, errReaderClosed
}
+ return r.matchTermNotClosedMaybeFinalizedWithRLock(field, term)
+}
+
+func (r *fsSegment) matchTermNotClosedMaybeFinalizedWithRLock(
+ field, term []byte,
+) (postings.List, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if r.finalized {
+ return nil, errReaderFinalized
+ }
termsFST, exists, err := r.retrieveTermsFSTWithRLock(field)
if err != nil {
@@ -453,17 +510,27 @@ func (r *fsSegment) MatchTerm(field []byte, term []byte) (postings.List, error)
return pl, nil
}
-func (r *fsSegment) MatchRegexp(field []byte, compiled index.CompiledRegex) (postings.List, error) {
+func (r *fsSegment) MatchRegexp(
+ field []byte,
+ compiled index.CompiledRegex,
+) (postings.List, error) {
r.RLock()
- pl, err := r.matchRegexpWithRLock(field, compiled)
- r.RUnlock()
- return pl, err
-}
-
-func (r *fsSegment) matchRegexpWithRLock(field []byte, compiled index.CompiledRegex) (postings.List, error) {
+ defer r.Unlock()
if r.closed {
return nil, errReaderClosed
}
+ return r.matchRegexpNotClosedMaybeFinalizedWithRLock(field, compiled)
+}
+
+func (r *fsSegment) matchRegexpNotClosedMaybeFinalizedWithRLock(
+ field []byte,
+ compiled index.CompiledRegex,
+) (postings.List, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if r.finalized {
+ return nil, errReaderFinalized
+ }
re := compiled.FST
if re == nil {
@@ -532,9 +599,18 @@ func (r *fsSegment) MatchAll() (postings.MutableList, error) {
if r.closed {
return nil, errReaderClosed
}
+ return r.matchAllNotClosedMaybeFinalizedWithRLock()
+}
+
+func (r *fsSegment) matchAllNotClosedMaybeFinalizedWithRLock() (postings.MutableList, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if r.finalized {
+ return nil, errReaderFinalized
+ }
pl := r.opts.PostingsListPool().Get()
- err := pl.AddRange(r.startInclusive, r.endExclusive)
+ err := pl.AddRange(0, postings.ID(r.numDocs))
if err != nil {
return nil, err
}
@@ -548,10 +624,19 @@ func (r *fsSegment) Doc(id postings.ID) (doc.Document, error) {
if r.closed {
return doc.Document{}, errReaderClosed
}
+ return r.docNotClosedMaybeFinalizedWithRLock(id)
+}
+
+func (r *fsSegment) docNotClosedMaybeFinalizedWithRLock(id postings.ID) (doc.Document, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if r.finalized {
+ return doc.Document{}, errReaderFinalized
+ }
// If using docs slice reader, return from the in memory slice reader
- if r.docsSliceReader != nil {
- return r.docsSliceReader.Read(id)
+ if r.docsThirdPartyReader != nil {
+ return r.docsThirdPartyReader.Read(id)
}
offset, err := r.docsIndexReader.Read(id)
@@ -568,8 +653,20 @@ func (r *fsSegment) Docs(pl postings.List) (doc.Iterator, error) {
if r.closed {
return nil, errReaderClosed
}
+ return r.docsNotClosedMaybeFinalizedWithRLock(r, pl)
+}
+
+func (r *fsSegment) docsNotClosedMaybeFinalizedWithRLock(
+ retriever index.DocRetriever,
+ pl postings.List,
+) (doc.Iterator, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if r.finalized {
+ return nil, errReaderFinalized
+ }
- return index.NewIDDocIterator(r, pl.Iterator()), nil
+ return index.NewIDDocIterator(retriever, pl.Iterator()), nil
}
func (r *fsSegment) AllDocs() (index.IDDocIterator, error) {
@@ -578,8 +675,20 @@ func (r *fsSegment) AllDocs() (index.IDDocIterator, error) {
if r.closed {
return nil, errReaderClosed
}
- pi := postings.NewRangeIterator(r.startInclusive, r.endExclusive)
- return index.NewIDDocIterator(r, pi), nil
+ return r.allDocsNotClosedMaybeFinalizedWithRLock(r)
+}
+
+func (r *fsSegment) allDocsNotClosedMaybeFinalizedWithRLock(
+ retriever index.DocRetriever,
+) (index.IDDocIterator, error) {
+ // NB(r): Not closed, but could be finalized (i.e. closed segment reader)
+ // calling match field after this segment is finalized.
+ if r.finalized {
+ return nil, errReaderFinalized
+ }
+
+ pi := postings.NewRangeIterator(0, postings.ID(r.numDocs))
+ return index.NewIDDocIterator(retriever, pi), nil
}
func (r *fsSegment) retrievePostingsListWithRLock(postingsOffset uint64) (postings.List, error) {
@@ -740,109 +849,164 @@ func (r *fsSegment) retrieveBytesWithRLock(base []byte, offset uint64) ([]byte,
return base[payloadStart:payloadEnd], nil
}
-var _ index.Reader = &fsSegmentReader{}
+var _ sgmt.Reader = (*fsSegmentReader)(nil)
+// fsSegmentReader is not thread safe for use and relies on the underlying
+// segment for synchronization.
type fsSegmentReader struct {
- sync.RWMutex
- closed bool
- ctx context.Context
- fsSegment *fsSegment
+ closed bool
+ ctx context.Context
+ fsSegment *fsSegment
+ termsIterable *termsIterable
}
func newReader(
fsSegment *fsSegment,
+ opts Options,
) *fsSegmentReader {
return &fsSegmentReader{
- ctx: context.NewContext(),
+ ctx: opts.ContextPool().Get(),
fsSegment: fsSegment,
}
}
+func (sr *fsSegmentReader) Fields() (sgmt.FieldsIterator, error) {
+ if sr.closed {
+ return nil, errReaderClosed
+ }
+
+ iter := newFSTTermsIter()
+ iter.reset(fstTermsIterOpts{
+ seg: sr.fsSegment,
+ fst: sr.fsSegment.fieldsFST,
+ finalizeFST: false,
+ })
+ return iter, nil
+}
+
+func (sr *fsSegmentReader) ContainsField(field []byte) (bool, error) {
+ if sr.closed {
+ return false, errReaderClosed
+ }
+
+ sr.fsSegment.RLock()
+ defer sr.fsSegment.RUnlock()
+ if sr.fsSegment.finalized {
+ return false, errReaderFinalized
+ }
+
+ return sr.fsSegment.fieldsFST.Contains(field)
+}
+
+func (sr *fsSegmentReader) Terms(field []byte) (sgmt.TermsIterator, error) {
+ if sr.closed {
+ return nil, errReaderClosed
+ }
+ if sr.termsIterable == nil {
+ sr.termsIterable = newTermsIterable(sr.fsSegment)
+ }
+ sr.fsSegment.RLock()
+ iter, err := sr.termsIterable.termsNotClosedMaybeFinalizedWithRLock(field)
+ sr.fsSegment.RUnlock()
+ return iter, err
+}
+
func (sr *fsSegmentReader) MatchField(field []byte) (postings.List, error) {
- sr.RLock()
if sr.closed {
- sr.RUnlock()
return nil, errReaderClosed
}
- pl, err := sr.fsSegment.MatchField(field)
- sr.RUnlock()
+ // NB(r): We are allowed to call match field after Close called on
+ // the segment but not after it is finalized.
+ sr.fsSegment.RLock()
+ pl, err := sr.fsSegment.matchFieldNotClosedMaybeFinalizedWithRLock(field)
+ sr.fsSegment.RUnlock()
return pl, err
}
func (sr *fsSegmentReader) MatchTerm(field []byte, term []byte) (postings.List, error) {
- sr.RLock()
if sr.closed {
- sr.RUnlock()
return nil, errReaderClosed
}
- pl, err := sr.fsSegment.MatchTerm(field, term)
- sr.RUnlock()
+ // NB(r): We are allowed to call match field after Close called on
+ // the segment but not after it is finalized.
+ sr.fsSegment.RLock()
+ pl, err := sr.fsSegment.matchTermNotClosedMaybeFinalizedWithRLock(field, term)
+ sr.fsSegment.RUnlock()
return pl, err
}
-func (sr *fsSegmentReader) MatchRegexp(field []byte, compiled index.CompiledRegex) (postings.List, error) {
- sr.RLock()
+func (sr *fsSegmentReader) MatchRegexp(
+ field []byte,
+ compiled index.CompiledRegex,
+) (postings.List, error) {
if sr.closed {
- sr.RUnlock()
return nil, errReaderClosed
}
- pl, err := sr.fsSegment.MatchRegexp(field, compiled)
- sr.RUnlock()
+ // NB(r): We are allowed to call match field after Close called on
+ // the segment but not after it is finalized.
+ sr.fsSegment.RLock()
+ pl, err := sr.fsSegment.matchRegexpNotClosedMaybeFinalizedWithRLock(field, compiled)
+ sr.fsSegment.RUnlock()
return pl, err
}
func (sr *fsSegmentReader) MatchAll() (postings.MutableList, error) {
- sr.RLock()
if sr.closed {
- sr.RUnlock()
return nil, errReaderClosed
}
- pl, err := sr.fsSegment.MatchAll()
- sr.RUnlock()
+ // NB(r): We are allowed to call match field after Close called on
+ // the segment but not after it is finalized.
+ sr.fsSegment.RLock()
+ pl, err := sr.fsSegment.matchAllNotClosedMaybeFinalizedWithRLock()
+ sr.fsSegment.RUnlock()
return pl, err
}
func (sr *fsSegmentReader) Doc(id postings.ID) (doc.Document, error) {
- sr.RLock()
if sr.closed {
- sr.RUnlock()
return doc.Document{}, errReaderClosed
}
- pl, err := sr.fsSegment.Doc(id)
- sr.RUnlock()
+ // NB(r): We are allowed to call match field after Close called on
+ // the segment but not after it is finalized.
+ sr.fsSegment.RLock()
+ pl, err := sr.fsSegment.docNotClosedMaybeFinalizedWithRLock(id)
+ sr.fsSegment.RUnlock()
return pl, err
}
func (sr *fsSegmentReader) Docs(pl postings.List) (doc.Iterator, error) {
- sr.RLock()
if sr.closed {
- sr.RUnlock()
return nil, errReaderClosed
}
- iter, err := sr.fsSegment.Docs(pl)
- sr.RUnlock()
+ // NB(r): We are allowed to call match field after Close called on
+ // the segment but not after it is finalized.
+ // Also make sure the doc retriever is the reader not the segment so that
+ // is closed check is not performed and only the is finalized check.
+ sr.fsSegment.RLock()
+ iter, err := sr.fsSegment.docsNotClosedMaybeFinalizedWithRLock(sr, pl)
+ sr.fsSegment.RUnlock()
return iter, err
}
func (sr *fsSegmentReader) AllDocs() (index.IDDocIterator, error) {
- sr.RLock()
if sr.closed {
- sr.RUnlock()
return nil, errReaderClosed
}
- iter, err := sr.fsSegment.AllDocs()
- sr.RUnlock()
+ // NB(r): We are allowed to call match field after Close called on
+ // the segment but not after it is finalized.
+ // Also make sure the doc retriever is the reader not the segment so that
+ // is closed check is not performed and only the is finalized check.
+ sr.fsSegment.RLock()
+ iter, err := sr.fsSegment.allDocsNotClosedMaybeFinalizedWithRLock(sr)
+ sr.fsSegment.RUnlock()
return iter, err
}
func (sr *fsSegmentReader) Close() error {
- sr.Lock()
if sr.closed {
- sr.Unlock()
return errReaderClosed
}
sr.closed = true
- sr.Unlock()
// Close the context so that segment doesn't need to track this any longer.
sr.ctx.Close()
return nil
diff --git a/src/m3ninx/index/segment/fst/types.go b/src/m3ninx/index/segment/fst/types.go
index 51c1002c80..bba1320930 100644
--- a/src/m3ninx/index/segment/fst/types.go
+++ b/src/m3ninx/index/segment/fst/types.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/m3ninx/index"
sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/x/context"
)
const (
@@ -57,6 +58,11 @@ var (
type Segment interface {
sgmt.ImmutableSegment
index.Readable
+
+ // SegmentData returns the segment data used to create the segment.
+ // Note: Must close context when done with the data
+ // so that can resources can be free'd safely.
+ SegmentData(ctx context.Context) (SegmentData, error)
}
// Writer writes out a FST segment from the provided elements.
diff --git a/src/m3ninx/index/segment/fst/writer.go b/src/m3ninx/index/segment/fst/writer.go
index d994fdab7f..c52b96943b 100644
--- a/src/m3ninx/index/segment/fst/writer.go
+++ b/src/m3ninx/index/segment/fst/writer.go
@@ -27,12 +27,9 @@ import (
"github.com/m3db/m3/src/m3ninx/generated/proto/fswriter"
sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/fst/encoding"
- "github.com/m3db/m3/src/m3ninx/index/segment/fst/encoding/docs"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/postings/pilosa"
- "github.com/m3db/m3/src/m3ninx/postings/roaring"
"github.com/m3db/m3/src/m3ninx/x"
- pilosaroaring "github.com/m3db/pilosa/roaring"
"github.com/golang/protobuf/proto"
)
@@ -54,24 +51,19 @@ type writer struct {
intEncoder *encoding.Encoder
postingsEncoder *pilosa.Encoder
fstWriter *fstWriter
- docDataWriter *docs.DataWriter
- docIndexWriter *docs.IndexWriter
+ docsWriter *DocumentsWriter
metadata []byte
docsDataFileWritten bool
postingsFileWritten bool
fstTermsFileWritten bool
- docOffsets []docOffset
fstTermsOffsets []uint64
termPostingsOffsets []uint64
// only used by versions >= 1.1
- fieldPostingsOffsets []uint64
- fieldsPilosaBitmap *pilosaroaring.Bitmap
- fieldsPostingsList postings.MutableList
- fieldsPostingsNeedsUnion []postings.List
- fieldData *fswriter.FieldData
- fieldBuffer proto.Buffer
+ fieldPostingsOffsets []uint64
+ fieldData *fswriter.FieldData
+ fieldBuffer proto.Buffer
}
// WriterOptions is a set of options used when writing an FST.
@@ -99,25 +91,22 @@ func newWriterWithVersion(opts WriterOptions, vers *Version) (Writer, error) {
return nil, err
}
- bitmap := pilosaroaring.NewBitmapWithDefaultPooling(defaultPilosaRoaringMaxContainerSize)
- pl := roaring.NewPostingsListFromBitmap(bitmap)
+ docsWriter, err := NewDocumentsWriter()
+ if err != nil {
+ return nil, err
+ }
return &writer{
version: v,
intEncoder: encoding.NewEncoder(defaultInitialIntEncoderSize),
postingsEncoder: pilosa.NewEncoder(),
fstWriter: newFSTWriter(opts),
- docDataWriter: docs.NewDataWriter(nil),
- docIndexWriter: docs.NewIndexWriter(nil),
- docOffsets: make([]docOffset, 0, defaultInitialDocOffsetsSize),
+ docsWriter: docsWriter,
fstTermsOffsets: make([]uint64, 0, defaultInitialFSTTermsOffsetsSize),
termPostingsOffsets: make([]uint64, 0, defaultInitialPostingsOffsetsSize),
- fieldPostingsOffsets: make([]uint64, 0, defaultInitialPostingsOffsetsSize),
- fieldsPilosaBitmap: bitmap,
- fieldsPostingsList: pl,
- fieldsPostingsNeedsUnion: make([]postings.List, 0, defaultInitialPostingsNeedsUnionSize),
- fieldData: &fswriter.FieldData{},
+ fieldPostingsOffsets: make([]uint64, 0, defaultInitialPostingsOffsetsSize),
+ fieldData: &fswriter.FieldData{},
}, nil
}
@@ -127,34 +116,20 @@ func (w *writer) clear() {
w.fstWriter.Reset(nil)
w.intEncoder.Reset()
w.postingsEncoder.Reset()
- w.docDataWriter.Reset(nil)
- w.docIndexWriter.Reset(nil)
+ w.docsWriter.Reset(DocumentsWriterOptions{})
w.metadata = nil
w.docsDataFileWritten = false
w.postingsFileWritten = false
w.fstTermsFileWritten = false
- // NB(r): Use a call to reset here instead of creating a new bitmaps
- // when roaring supports a call to reset.
- w.docOffsets = w.docOffsets[:0]
w.fstTermsOffsets = w.fstTermsOffsets[:0]
w.termPostingsOffsets = w.termPostingsOffsets[:0]
w.fieldPostingsOffsets = w.fieldPostingsOffsets[:0]
- w.fieldsPilosaBitmap.Reset()
- w.fieldsPostingsList.Reset()
- w.resetFieldsPostingsNeedsUnion()
w.fieldData.Reset()
w.fieldBuffer.Reset()
}
-func (w *writer) resetFieldsPostingsNeedsUnion() {
- for i := range w.fieldsPostingsNeedsUnion {
- w.fieldsPostingsNeedsUnion[i] = nil
- }
- w.fieldsPostingsNeedsUnion = w.fieldsPostingsNeedsUnion[:0]
-}
-
func (w *writer) Reset(b sgmt.Builder) error {
w.clear()
@@ -189,8 +164,6 @@ func (w *writer) Metadata() []byte {
}
func (w *writer) WriteDocumentsData(iow io.Writer) error {
- w.docDataWriter.Reset(iow)
-
iter, err := w.builder.AllDocs()
closer := x.NewSafeCloser(iter)
defer closer.Close()
@@ -198,18 +171,12 @@ func (w *writer) WriteDocumentsData(iow io.Writer) error {
return err
}
- var currOffset uint64
- if int64(cap(w.docOffsets)) < w.size {
- w.docOffsets = make([]docOffset, 0, w.size)
- }
- for iter.Next() {
- id, doc := iter.PostingsID(), iter.Current()
- n, err := w.docDataWriter.Write(doc)
- if err != nil {
- return err
- }
- w.docOffsets = append(w.docOffsets, docOffset{ID: id, offset: currOffset})
- currOffset += uint64(n)
+ w.docsWriter.Reset(DocumentsWriterOptions{
+ Iter: iter,
+ SizeHint: int(w.size),
+ })
+ if err := w.docsWriter.WriteDocumentsData(iow); err != nil {
+ return err
}
w.docsDataFileWritten = true
@@ -221,14 +188,7 @@ func (w *writer) WriteDocumentsIndex(iow io.Writer) error {
return fmt.Errorf("documents data file has to be written before documents index file")
}
- w.docIndexWriter.Reset(iow)
- for _, do := range w.docOffsets {
- if err := w.docIndexWriter.Write(do.ID, do.offset); err != nil {
- return err
- }
- }
-
- return nil
+ return w.docsWriter.WriteDocumentsIndex(iow)
}
func (w *writer) WritePostingsOffsets(iow io.Writer) error {
@@ -247,23 +207,20 @@ func (w *writer) WritePostingsOffsets(iow io.Writer) error {
}
// retrieve known fields
- fields, err := w.builder.Fields()
+ fields, err := w.builder.FieldsPostingsList()
if err != nil {
return err
}
// for each known field
for fields.Next() {
- f := fields.Current()
+ f, fieldPostingsList := fields.Current()
// retrieve known terms for current field
terms, err := w.builder.Terms(f)
if err != nil {
return err
}
- // Reset the fields postings lists needs union slice.
- w.resetFieldsPostingsNeedsUnion()
-
// for each term corresponding to the current field
for terms.Next() {
_, pl := terms.Current()
@@ -276,23 +233,12 @@ func (w *writer) WritePostingsOffsets(iow io.Writer) error {
currentOffset += n
// track current offset as the offset for the current field/term
w.termPostingsOffsets = append(w.termPostingsOffsets, currentOffset)
-
- // update field level postings list
- if writeFieldsPostingList {
- w.fieldsPostingsNeedsUnion = append(w.fieldsPostingsNeedsUnion, pl)
- }
}
// write the field level postings list
if writeFieldsPostingList {
- // Union in a single pass all the postings lists that needs a union.
- w.fieldsPostingsList.Reset()
- w.fieldsPostingsList.UnionMany(w.fieldsPostingsNeedsUnion)
- // Release refs to any postings lists we held refs to with the slice.
- w.resetFieldsPostingsNeedsUnion()
-
// Write the unioned postings list out.
- n, err := writePL(w.fieldsPostingsList)
+ n, err := writePL(fieldPostingsList)
if err != nil {
return err
}
@@ -334,7 +280,7 @@ func (w *writer) WriteFSTTerms(iow io.Writer) error {
)
// retrieve all known fields
- fields, err := w.builder.Fields()
+ fields, err := w.builder.FieldsPostingsList()
if err != nil {
return err
}
@@ -347,7 +293,7 @@ func (w *writer) WriteFSTTerms(iow io.Writer) error {
// build a fst for each field's terms
for fields.Next() {
- f := fields.Current()
+ f, _ := fields.Current()
// write fields level postings list if required
if writeFieldsPostingList {
@@ -470,14 +416,14 @@ func (w *writer) WriteFSTFields(iow io.Writer) error {
offsets := w.fstTermsOffsets
// retrieve all known fields
- fields, err := w.builder.Fields()
+ fields, err := w.builder.FieldsPostingsList()
if err != nil {
return err
}
// insert each field into fst
for fields.Next() {
- f := fields.Current()
+ f, _ := fields.Current()
// get offset for this field's term fst
if len(offsets) == 0 {
diff --git a/src/m3ninx/index/segment/fst/writer_reader_test.go b/src/m3ninx/index/segment/fst/writer_reader_test.go
index 52bcb605bc..5fe5a60a36 100644
--- a/src/m3ninx/index/segment/fst/writer_reader_test.go
+++ b/src/m3ninx/index/segment/fst/writer_reader_test.go
@@ -476,7 +476,6 @@ func TestFieldsEqualsParallel(t *testing.T) {
func TestPostingsListLifecycleSimple(t *testing.T) {
_, fstSeg := newTestSegments(t, fewTestDocuments)
-
require.NoError(t, fstSeg.Close())
_, err := fstSeg.FieldsIterable().Fields()
@@ -498,6 +497,77 @@ func TestPostingsListReaderLifecycle(t *testing.T) {
require.NoError(t, err)
}
+func TestSegmentReaderValidUntilClose(t *testing.T) {
+ _, fstSeg := newTestSegments(t, fewTestDocuments)
+
+ reader, err := fstSeg.Reader()
+ require.NoError(t, err)
+
+ // Close segment early, expect reader still valid until close.
+ err = fstSeg.Close()
+ require.NoError(t, err)
+
+ // Make sure all methods allow for calls until the reader is closed.
+ var (
+ list postings.List
+ )
+ list, err = reader.MatchField([]byte("fruit"))
+ require.NoError(t, err)
+ assertPostingsList(t, list, []postings.ID{0, 1, 2})
+
+ list, err = reader.MatchTerm([]byte("color"), []byte("yellow"))
+ require.NoError(t, err)
+ assertPostingsList(t, list, []postings.ID{0, 2})
+
+ re, err := index.CompileRegex([]byte("^.*apple$"))
+ require.NoError(t, err)
+ list, err = reader.MatchRegexp([]byte("fruit"), re)
+ require.NoError(t, err)
+ assertPostingsList(t, list, []postings.ID{1, 2})
+
+ list, err = reader.MatchAll()
+ require.NoError(t, err)
+ assertPostingsList(t, list, []postings.ID{0, 1, 2})
+
+ _, err = reader.Doc(0)
+ require.NoError(t, err)
+
+ _, err = reader.Docs(list)
+ require.NoError(t, err)
+
+ _, err = reader.AllDocs()
+ require.NoError(t, err)
+
+ // Test returned iterators also work
+ re, err = index.CompileRegex([]byte("^.*apple$"))
+ require.NoError(t, err)
+ list, err = reader.MatchRegexp([]byte("fruit"), re)
+ require.NoError(t, err)
+ iter, err := reader.Docs(list)
+ require.NoError(t, err)
+ var docs int
+ for iter.Next() {
+ docs++
+ var fruitField doc.Field
+ for _, field := range iter.Current().Fields {
+ if bytes.Equal(field.Name, []byte("fruit")) {
+ fruitField = field
+ break
+ }
+ }
+ require.True(t, bytes.HasSuffix(fruitField.Value, []byte("apple")))
+ }
+ require.NoError(t, iter.Err())
+ require.NoError(t, iter.Close())
+
+ // Now close.
+ require.NoError(t, reader.Close())
+
+ // Make sure reader now starts returning errors.
+ _, err = reader.MatchTerm([]byte("color"), []byte("yellow"))
+ require.Error(t, err)
+}
+
func newTestSegments(t *testing.T, docs []doc.Document) (memSeg sgmt.MutableSegment, fstSeg sgmt.Segment) {
s := newTestMemSegment(t)
for _, d := range docs {
@@ -509,7 +579,7 @@ func newTestSegments(t *testing.T, docs []doc.Document) (memSeg sgmt.MutableSegm
func newTestMemSegment(t *testing.T) sgmt.MutableSegment {
opts := mem.NewOptions()
- s, err := mem.NewSegment(postings.ID(0), opts)
+ s, err := mem.NewSegment(opts)
require.NoError(t, err)
return s
}
@@ -535,6 +605,48 @@ func assertDocsEqual(t *testing.T, a, b doc.Iterator) {
}
}
+func assertPostingsList(t *testing.T, l postings.List, exp []postings.ID) {
+ it := l.Iterator()
+
+ defer func() {
+ require.False(t, it.Next(), "should exhaust just once")
+ require.NoError(t, it.Err(), "should not complete with error")
+ require.NoError(t, it.Close(), "should not encounter error on close")
+ }()
+
+ match := make(map[postings.ID]struct{}, len(exp))
+ for _, v := range exp {
+ match[v] = struct{}{}
+ }
+
+ for it.Next() {
+ curr := it.Current()
+
+ _, ok := match[curr]
+ if !ok {
+ require.Fail(t,
+ fmt.Sprintf("expected %d, not found in postings iter", curr))
+ return
+ }
+
+ delete(match, curr)
+ }
+
+ if len(match) == 0 {
+ // Success.
+ return
+ }
+
+ remaining := make([]int, 0, len(match))
+ for id := range match {
+ remaining = append(remaining, int(id))
+ }
+
+ msg := fmt.Sprintf("unmatched expected IDs %v, not found in postings iter",
+ remaining)
+ require.Fail(t, msg)
+}
+
func collectDocs(iter doc.Iterator) ([]doc.Document, error) {
var docs []doc.Document
for iter.Next() {
diff --git a/src/m3ninx/index/segment/mem/fields_map_new.go b/src/m3ninx/index/segment/mem/fields_map_new.go
index 8330583f73..c13cc1737e 100644
--- a/src/m3ninx/index/segment/mem/fields_map_new.go
+++ b/src/m3ninx/index/segment/mem/fields_map_new.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/m3ninx/index/segment/mem/ids_map_new.go b/src/m3ninx/index/segment/mem/ids_map_new.go
index 78b644c16c..55c5fcd43c 100644
--- a/src/m3ninx/index/segment/mem/ids_map_new.go
+++ b/src/m3ninx/index/segment/mem/ids_map_new.go
@@ -23,7 +23,7 @@ package mem
import (
"bytes"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// newIDsMap returns a new set of IDs.
diff --git a/src/m3ninx/index/segment/mem/mem_mock.go b/src/m3ninx/index/segment/mem/mem_mock.go
index dfff8572f5..af7483beb4 100644
--- a/src/m3ninx/index/segment/mem/mem_mock.go
+++ b/src/m3ninx/index/segment/mem/mem_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/m3ninx/index/segment/mem (interfaces: ReadableSegment)
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -29,6 +29,7 @@ import (
"regexp"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/golang/mock/gomock"
@@ -57,6 +58,51 @@ func (m *MockReadableSegment) EXPECT() *MockReadableSegmentMockRecorder {
return m.recorder
}
+// ContainsField mocks base method
+func (m *MockReadableSegment) ContainsField(arg0 []byte) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ContainsField", arg0)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ContainsField indicates an expected call of ContainsField
+func (mr *MockReadableSegmentMockRecorder) ContainsField(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsField", reflect.TypeOf((*MockReadableSegment)(nil).ContainsField), arg0)
+}
+
+// Fields mocks base method
+func (m *MockReadableSegment) Fields() (segment.FieldsIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Fields")
+ ret0, _ := ret[0].(segment.FieldsIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Fields indicates an expected call of Fields
+func (mr *MockReadableSegmentMockRecorder) Fields() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockReadableSegment)(nil).Fields))
+}
+
+// Terms mocks base method
+func (m *MockReadableSegment) Terms(arg0 []byte) (segment.TermsIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Terms", arg0)
+ ret0, _ := ret[0].(segment.TermsIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Terms indicates an expected call of Terms
+func (mr *MockReadableSegmentMockRecorder) Terms(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockReadableSegment)(nil).Terms), arg0)
+}
+
// getDoc mocks base method
func (m *MockReadableSegment) getDoc(arg0 postings.ID) (doc.Document, error) {
m.ctrl.T.Helper()
diff --git a/src/m3ninx/index/segment/mem/merge_test.go b/src/m3ninx/index/segment/mem/merge_test.go
index 06f32cb3e2..0a89b41112 100644
--- a/src/m3ninx/index/segment/mem/merge_test.go
+++ b/src/m3ninx/index/segment/mem/merge_test.go
@@ -25,7 +25,6 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index"
- "github.com/m3db/m3/src/m3ninx/postings"
"github.com/stretchr/testify/require"
)
@@ -76,19 +75,19 @@ func TestMemSegmentMerge(t *testing.T) {
rest := docs[1:]
opts := NewOptions()
- m1, err := NewSegment(postings.ID(0), opts)
+ m1, err := NewSegment(opts)
require.NoError(t, err)
_, err = m1.Insert(d)
require.NoError(t, err)
- m2, err := NewSegment(postings.ID(0), opts)
+ m2, err := NewSegment(opts)
require.NoError(t, err)
for _, d := range rest {
_, err = m2.Insert(d)
require.NoError(t, err)
}
- m3, err := NewSegment(postings.ID(0), opts)
+ m3, err := NewSegment(opts)
require.NoError(t, err)
require.NoError(t, Merge(m3, m1, m2))
diff --git a/src/m3ninx/index/segment/mem/postings_map_new.go b/src/m3ninx/index/segment/mem/postings_map_new.go
index e727f103e9..531e78c806 100644
--- a/src/m3ninx/index/segment/mem/postings_map_new.go
+++ b/src/m3ninx/index/segment/mem/postings_map_new.go
@@ -23,7 +23,7 @@ package mem
import (
"bytes"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// newPostingsMap returns a new []bytes->postings.MutableList map.
diff --git a/src/m3ninx/index/segment/mem/reader.go b/src/m3ninx/index/segment/mem/reader.go
index b46d2482ac..385bc24e4a 100644
--- a/src/m3ninx/index/segment/mem/reader.go
+++ b/src/m3ninx/index/segment/mem/reader.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index"
+ sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/postings"
)
@@ -49,7 +50,7 @@ type readerDocRange struct {
endExclusive postings.ID
}
-func newReader(s ReadableSegment, l readerDocRange, p postings.Pool) index.Reader {
+func newReader(s ReadableSegment, l readerDocRange, p postings.Pool) sgmt.Reader {
return &reader{
segment: s,
limits: l,
@@ -57,6 +58,18 @@ func newReader(s ReadableSegment, l readerDocRange, p postings.Pool) index.Reade
}
}
+func (r *reader) Fields() (sgmt.FieldsIterator, error) {
+ return r.segment.Fields()
+}
+
+func (r *reader) ContainsField(field []byte) (bool, error) {
+ return r.segment.ContainsField(field)
+}
+
+func (r *reader) Terms(field []byte) (sgmt.TermsIterator, error) {
+ return r.segment.Terms(field)
+}
+
func (r *reader) MatchField(field []byte) (postings.List, error) {
// falling back to regexp .* as this segment implementation is only used in tests.
return r.MatchRegexp(field, index.DotStarCompiledRegex())
diff --git a/src/m3ninx/index/segment/mem/segment.go b/src/m3ninx/index/segment/mem/segment.go
index 17454bc86c..9a45d1b2ec 100644
--- a/src/m3ninx/index/segment/mem/segment.go
+++ b/src/m3ninx/index/segment/mem/segment.go
@@ -27,7 +27,7 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index"
- sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/util"
)
@@ -38,7 +38,7 @@ var (
)
// nolint: maligned
-type segment struct {
+type memSegment struct {
offset int
plPool postings.Pool
newUUIDFn util.NewUUIDFn
@@ -68,31 +68,37 @@ type segment struct {
// NewSegment returns a new in-memory mutable segment. It will start assigning
// postings IDs at the provided offset.
-func NewSegment(offset postings.ID, opts Options) (sgmt.MutableSegment, error) {
- s := &segment{
- offset: int(offset),
+func NewSegment(opts Options) (segment.MutableSegment, error) {
+ s := &memSegment{
plPool: opts.PostingsListPool(),
newUUIDFn: opts.NewUUIDFn(),
termsDict: newTermsDict(opts),
- readerID: postings.NewAtomicID(offset),
+ readerID: postings.NewAtomicID(0),
}
s.docs.data = make([]doc.Document, opts.InitialCapacity())
s.writer.idSet = newIDsMap(256)
- s.writer.nextID = offset
+ s.writer.nextID = 0
return s, nil
}
-func (s *segment) Reset(offset postings.ID) {
+func (s *memSegment) SetIndexConcurrency(value int) {
+ // No-op, does not support concurrent indexing.
+}
+
+func (s *memSegment) IndexConcurrency() int {
+ return 1
+}
+
+func (s *memSegment) Reset() {
s.state.Lock()
defer s.state.Unlock()
s.state.sealed = false
- s.offset = int(offset)
s.termsDict.Reset()
- s.readerID = postings.NewAtomicID(offset)
+ s.readerID = postings.NewAtomicID(0)
var empty doc.Document
for i := range s.docs.data {
@@ -101,20 +107,13 @@ func (s *segment) Reset(offset postings.ID) {
s.docs.data = s.docs.data[:0]
s.writer.idSet.Reset()
- s.writer.nextID = offset
+ s.writer.nextID = 0
}
-func (s *segment) Offset() postings.ID {
- s.state.RLock()
- offset := postings.ID(s.offset)
- s.state.RUnlock()
- return offset
-}
-
-func (s *segment) Size() int64 {
+func (s *memSegment) Size() int64 {
s.state.RLock()
closed := s.state.closed
- size := int64(s.readerID.Load()) - int64(s.offset)
+ size := int64(s.readerID.Load())
s.state.RUnlock()
if closed {
return 0
@@ -122,7 +121,7 @@ func (s *segment) Size() int64 {
return size
}
-func (s *segment) Docs() []doc.Document {
+func (s *memSegment) Docs() []doc.Document {
s.state.RLock()
defer s.state.RUnlock()
@@ -132,11 +131,11 @@ func (s *segment) Docs() []doc.Document {
return s.docs.data[:s.readerID.Load()]
}
-func (s *segment) ContainsID(id []byte) (bool, error) {
+func (s *memSegment) ContainsID(id []byte) (bool, error) {
s.state.RLock()
if s.state.closed {
s.state.RUnlock()
- return false, sgmt.ErrClosed
+ return false, segment.ErrClosed
}
contains := s.containsIDWithStateLock(id)
@@ -144,15 +143,15 @@ func (s *segment) ContainsID(id []byte) (bool, error) {
return contains, nil
}
-func (s *segment) containsIDWithStateLock(id []byte) bool {
+func (s *memSegment) containsIDWithStateLock(id []byte) bool {
return s.termsDict.ContainsTerm(doc.IDReservedFieldName, id)
}
-func (s *segment) ContainsField(f []byte) (bool, error) {
+func (s *memSegment) ContainsField(f []byte) (bool, error) {
s.state.RLock()
if s.state.closed {
s.state.RUnlock()
- return false, sgmt.ErrClosed
+ return false, segment.ErrClosed
}
contains := s.termsDict.ContainsField(f)
@@ -160,11 +159,11 @@ func (s *segment) ContainsField(f []byte) (bool, error) {
return contains, nil
}
-func (s *segment) Insert(d doc.Document) ([]byte, error) {
+func (s *memSegment) Insert(d doc.Document) ([]byte, error) {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
- return nil, sgmt.ErrClosed
+ return nil, segment.ErrClosed
}
{
@@ -189,11 +188,11 @@ func (s *segment) Insert(d doc.Document) ([]byte, error) {
return d.ID, nil
}
-func (s *segment) InsertBatch(b index.Batch) error {
+func (s *memSegment) InsertBatch(b index.Batch) error {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
- return sgmt.ErrClosed
+ return segment.ErrClosed
}
batchErr := index.NewBatchPartialError()
@@ -232,7 +231,7 @@ func (s *segment) InsertBatch(b index.Batch) error {
// prepareDocsWithLocks ensures the given documents can be inserted into the index. It
// must be called with the state and writer locks.
-func (s *segment) prepareDocsWithLocks(
+func (s *memSegment) prepareDocsWithLocks(
b index.Batch,
batchErr *index.BatchPartialError,
) error {
@@ -294,7 +293,7 @@ func (s *segment) prepareDocsWithLocks(
// insertDocWithLocks inserts a document into the index. It must be called with the
// state and writer locks.
-func (s *segment) insertDocWithLocks(d doc.Document) error {
+func (s *memSegment) insertDocWithLocks(d doc.Document) error {
nextID := s.writer.nextID
s.storeDocWithStateLock(nextID, d)
s.writer.nextID++
@@ -303,7 +302,7 @@ func (s *segment) insertDocWithLocks(d doc.Document) error {
// indexDocWithStateLock indexes the fields of a document in the segment's terms
// dictionary. It must be called with the segment's state lock.
-func (s *segment) indexDocWithStateLock(id postings.ID, d doc.Document) error {
+func (s *memSegment) indexDocWithStateLock(id postings.ID, d doc.Document) error {
for _, f := range d.Fields {
if err := s.termsDict.Insert(f, id); err != nil {
return err
@@ -317,8 +316,8 @@ func (s *segment) indexDocWithStateLock(id postings.ID, d doc.Document) error {
// storeDocWithStateLock stores a documents into the segment's mapping of postings
// IDs to documents. It must be called with the segment's state lock.
-func (s *segment) storeDocWithStateLock(id postings.ID, d doc.Document) {
- idx := int(id) - s.offset
+func (s *memSegment) storeDocWithStateLock(id postings.ID, d doc.Document) {
+ idx := int(id)
// Can return early if we have sufficient capacity.
{
@@ -355,21 +354,21 @@ func (s *segment) storeDocWithStateLock(id postings.ID, d doc.Document) {
}
}
-func (s *segment) Reader() (index.Reader, error) {
+func (s *memSegment) Reader() (segment.Reader, error) {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
- return nil, sgmt.ErrClosed
+ return nil, segment.ErrClosed
}
limits := readerDocRange{
- startInclusive: postings.ID(s.offset),
+ startInclusive: postings.ID(0),
endExclusive: s.readerID.Load(),
}
return newReader(s, limits, s.plPool), nil
}
-func (s *segment) AllDocs() (index.IDDocIterator, error) {
+func (s *memSegment) AllDocs() (index.IDDocIterator, error) {
r, err := s.Reader()
if err != nil {
return nil, err
@@ -377,34 +376,34 @@ func (s *segment) AllDocs() (index.IDDocIterator, error) {
return r.AllDocs()
}
-func (s *segment) matchTerm(field, term []byte) (postings.List, error) {
+func (s *memSegment) matchTerm(field, term []byte) (postings.List, error) {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
- return nil, sgmt.ErrClosed
+ return nil, segment.ErrClosed
}
return s.termsDict.MatchTerm(field, term), nil
}
-func (s *segment) matchRegexp(field []byte, compiled *re.Regexp) (postings.List, error) {
+func (s *memSegment) matchRegexp(field []byte, compiled *re.Regexp) (postings.List, error) {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
- return nil, sgmt.ErrClosed
+ return nil, segment.ErrClosed
}
return s.termsDict.MatchRegexp(field, compiled), nil
}
-func (s *segment) getDoc(id postings.ID) (doc.Document, error) {
+func (s *memSegment) getDoc(id postings.ID) (doc.Document, error) {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
- return doc.Document{}, sgmt.ErrClosed
+ return doc.Document{}, segment.ErrClosed
}
- idx := int(id) - s.offset
+ idx := int(id)
s.docs.RLock()
if idx >= len(s.docs.data) {
@@ -417,18 +416,18 @@ func (s *segment) getDoc(id postings.ID) (doc.Document, error) {
return d, nil
}
-func (s *segment) Close() error {
+func (s *memSegment) Close() error {
s.state.Lock()
defer s.state.Unlock()
if s.state.closed {
- return sgmt.ErrClosed
+ return segment.ErrClosed
}
s.state.closed = true
return nil
}
-func (s *segment) IsSealed() bool {
+func (s *memSegment) IsSealed() bool {
s.state.Lock()
defer s.state.Unlock()
if s.state.closed {
@@ -437,11 +436,11 @@ func (s *segment) IsSealed() bool {
return s.state.sealed
}
-func (s *segment) Seal() error {
+func (s *memSegment) Seal() error {
s.state.Lock()
defer s.state.Unlock()
if s.state.closed {
- return sgmt.ErrClosed
+ return segment.ErrClosed
}
if s.state.sealed {
@@ -452,7 +451,7 @@ func (s *segment) Seal() error {
return nil
}
-func (s *segment) Fields() (sgmt.FieldsIterator, error) {
+func (s *memSegment) Fields() (segment.FieldsIterator, error) {
s.state.RLock()
defer s.state.RUnlock()
if err := s.checkIsSealedWithRLock(); err != nil {
@@ -461,7 +460,16 @@ func (s *segment) Fields() (sgmt.FieldsIterator, error) {
return s.termsDict.Fields(), nil
}
-func (s *segment) Terms(name []byte) (sgmt.TermsIterator, error) {
+func (s *memSegment) FieldsPostingsList() (segment.FieldsPostingsListIterator, error) {
+ s.state.RLock()
+ defer s.state.RUnlock()
+ if err := s.checkIsSealedWithRLock(); err != nil {
+ return nil, err
+ }
+ return s.termsDict.FieldsPostingsList(), nil
+}
+
+func (s *memSegment) Terms(name []byte) (segment.TermsIterator, error) {
s.state.RLock()
defer s.state.RUnlock()
if err := s.checkIsSealedWithRLock(); err != nil {
@@ -470,17 +478,21 @@ func (s *segment) Terms(name []byte) (sgmt.TermsIterator, error) {
return s.termsDict.Terms(name), nil
}
-func (s *segment) FieldsIterable() sgmt.FieldsIterable {
+func (s *memSegment) FieldsIterable() segment.FieldsIterable {
+ return s
+}
+
+func (s *memSegment) FieldsPostingsListIterable() segment.FieldsPostingsListIterable {
return s
}
-func (s *segment) TermsIterable() sgmt.TermsIterable {
+func (s *memSegment) TermsIterable() segment.TermsIterable {
return s
}
-func (s *segment) checkIsSealedWithRLock() error {
+func (s *memSegment) checkIsSealedWithRLock() error {
if s.state.closed {
- return sgmt.ErrClosed
+ return segment.ErrClosed
}
if !s.state.sealed {
return errSegmentIsUnsealed
diff --git a/src/m3ninx/index/segment/mem/segment_bench_test.go b/src/m3ninx/index/segment/mem/segment_bench_test.go
index 98811f18a7..ee344f17a9 100644
--- a/src/m3ninx/index/segment/mem/segment_bench_test.go
+++ b/src/m3ninx/index/segment/mem/segment_bench_test.go
@@ -70,7 +70,7 @@ func benchmarkInsertSegment(docs []doc.Document, b *testing.B) {
for n := 0; n < b.N; n++ {
b.StopTimer()
- s, err := NewSegment(0, NewOptions())
+ s, err := NewSegment(NewOptions())
if err != nil {
b.Fatalf("unable to construct new segment: %v", err)
}
@@ -85,11 +85,11 @@ func benchmarkInsertSegment(docs []doc.Document, b *testing.B) {
func benchmarkMatchTermSegment(docs []doc.Document, b *testing.B) {
b.ReportAllocs()
- sgmt, err := NewSegment(0, NewOptions())
+ sgmt, err := NewSegment(NewOptions())
if err != nil {
b.Fatalf("unable to construct new segment: %v", err)
}
- s := sgmt.(*segment)
+ s := sgmt.(*memSegment)
for _, d := range docs {
s.Insert(d)
}
@@ -107,11 +107,11 @@ func benchmarkMatchTermSegment(docs []doc.Document, b *testing.B) {
func benchmarkMatchRegexSegment(docs []doc.Document, b *testing.B) {
b.ReportAllocs()
- sgmt, err := NewSegment(0, NewOptions())
+ sgmt, err := NewSegment(NewOptions())
if err != nil {
b.Fatalf("unable to construct new segment: %v", err)
}
- s := sgmt.(*segment)
+ s := sgmt.(*memSegment)
for _, d := range docs {
s.Insert(d)
}
diff --git a/src/m3ninx/index/segment/mem/segment_test.go b/src/m3ninx/index/segment/mem/segment_test.go
index 5612d55446..60137213ca 100644
--- a/src/m3ninx/index/segment/mem/segment_test.go
+++ b/src/m3ninx/index/segment/mem/segment_test.go
@@ -107,7 +107,7 @@ func TestSegmentInsert(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
require.Equal(t, int64(0), segment.Size())
@@ -169,7 +169,7 @@ func TestSegmentInsertDuplicateID(t *testing.T) {
}
)
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
require.Equal(t, int64(0), segment.Size())
@@ -245,7 +245,7 @@ func TestSegmentInsertBatch(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
require.Equal(t, int64(0), segment.Size())
@@ -306,7 +306,7 @@ func TestSegmentInsertBatchError(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.Equal(t, int64(0), segment.Size())
require.NoError(t, err)
@@ -394,7 +394,7 @@ func TestSegmentInsertBatchPartialError(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
require.Equal(t, int64(0), segment.Size())
@@ -460,7 +460,7 @@ func TestSegmentInsertBatchPartialErrorInvalidDoc(t *testing.T) {
},
index.AllowPartialUpdates(),
)
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
err = segment.InsertBatch(b1)
@@ -514,7 +514,7 @@ func TestSegmentContainsID(t *testing.T) {
},
index.AllowPartialUpdates(),
)
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
ok, err := segment.ContainsID([]byte("abc"))
require.NoError(t, err)
@@ -573,7 +573,7 @@ func TestSegmentContainsField(t *testing.T) {
},
}
b1 := index.NewBatch(docs, index.AllowPartialUpdates())
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
err = segment.InsertBatch(b1)
@@ -642,7 +642,7 @@ func TestSegmentInsertBatchPartialErrorAlreadyIndexing(t *testing.T) {
},
index.AllowPartialUpdates())
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
err = segment.InsertBatch(b1)
@@ -697,7 +697,7 @@ func TestSegmentReaderMatchExact(t *testing.T) {
},
}
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
for _, doc := range docs {
@@ -736,7 +736,7 @@ func TestSegmentReaderMatchExact(t *testing.T) {
}
func TestSegmentSealLifecycle(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
err = segment.Seal()
@@ -747,7 +747,7 @@ func TestSegmentSealLifecycle(t *testing.T) {
}
func TestSegmentSealCloseLifecycle(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
require.NoError(t, segment.Close())
@@ -756,7 +756,7 @@ func TestSegmentSealCloseLifecycle(t *testing.T) {
}
func TestSegmentIsSealed(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
require.False(t, segment.IsSealed())
@@ -770,7 +770,7 @@ func TestSegmentIsSealed(t *testing.T) {
}
func TestSegmentFields(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
knownsFields := map[string]struct{}{}
@@ -796,7 +796,7 @@ func TestSegmentFields(t *testing.T) {
}
func TestSegmentTerms(t *testing.T) {
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
knownsFields := map[string]map[string]struct{}{}
@@ -829,7 +829,7 @@ func TestSegmentTerms(t *testing.T) {
func TestSegmentReaderMatchRegex(t *testing.T) {
docs := testDocuments
- segment, err := NewSegment(0, testOptions)
+ segment, err := NewSegment(testOptions)
require.NoError(t, err)
for _, doc := range docs {
diff --git a/src/m3ninx/index/segment/mem/terms_dict.go b/src/m3ninx/index/segment/mem/terms_dict.go
index 1743a8a0a7..fc2f7087c1 100644
--- a/src/m3ninx/index/segment/mem/terms_dict.go
+++ b/src/m3ninx/index/segment/mem/terms_dict.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -27,12 +27,15 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/postings"
+ "github.com/m3db/m3/src/m3ninx/postings/roaring"
)
// termsDict is an in-memory terms dictionary. It maps fields to postings lists.
type termsDict struct {
opts Options
+ currFieldsPostingsLists []postings.List
+
fields struct {
sync.RWMutex
*fieldsMap
@@ -84,6 +87,29 @@ func (d *termsDict) Fields() sgmt.FieldsIterator {
return newBytesSliceIter(fields, d.opts)
}
+func (d *termsDict) FieldsPostingsList() sgmt.FieldsPostingsListIterator {
+ d.fields.RLock()
+ defer d.fields.RUnlock()
+ // NB(bodu): This is probably fine since the terms dict/mem segment is only used in tests.
+ fields := make([]uniqueField, 0, d.fields.Len())
+ for _, entry := range d.fields.Iter() {
+ d.currFieldsPostingsLists = d.currFieldsPostingsLists[:0]
+ field := entry.Key()
+ pl := roaring.NewPostingsList()
+ if postingsMap, ok := d.fields.Get(field); ok {
+ for _, entry := range postingsMap.Iter() {
+ d.currFieldsPostingsLists = append(d.currFieldsPostingsLists, entry.value)
+ }
+ }
+ pl.UnionMany(d.currFieldsPostingsLists)
+ fields = append(fields, uniqueField{
+ field: field,
+ postingsList: pl,
+ })
+ }
+ return newUniqueFieldsIter(fields, d.opts)
+}
+
func (d *termsDict) Terms(field []byte) sgmt.TermsIterator {
d.fields.RLock()
defer d.fields.RUnlock()
diff --git a/src/m3ninx/index/segment/mem/types.go b/src/m3ninx/index/segment/mem/types.go
index 663175f35b..13bbea45fc 100644
--- a/src/m3ninx/index/segment/mem/types.go
+++ b/src/m3ninx/index/segment/mem/types.go
@@ -52,6 +52,9 @@ type termsDictionary interface {
// Fields returns the known fields.
Fields() sgmt.FieldsIterator
+ // Fields returns the known fields.
+ FieldsPostingsList() sgmt.FieldsPostingsListIterator
+
// Terms returns the known terms values for the given field.
Terms(field []byte) sgmt.TermsIterator
@@ -66,12 +69,10 @@ type termsDictionary interface {
// mode so we can't mock this interface if its private. Once mockgen supports mocking
// private interfaces which contain embedded interfaces we can make this interface private.
type ReadableSegment interface {
- // matchTerm returns the postings list of documents which match the given term exactly.
+ Fields() (sgmt.FieldsIterator, error)
+ ContainsField(field []byte) (bool, error)
+ Terms(field []byte) (sgmt.TermsIterator, error)
matchTerm(field, term []byte) (postings.List, error)
-
- // matchRegexp returns the postings list of documents which match the given regular expression.
matchRegexp(field []byte, compiled *re.Regexp) (postings.List, error)
-
- // getDoc returns the document associated with the given ID.
getDoc(id postings.ID) (doc.Document, error)
}
diff --git a/src/m3ninx/index/segment/mem/unique_fields_iterator.go b/src/m3ninx/index/segment/mem/unique_fields_iterator.go
new file mode 100644
index 0000000000..c26cac69de
--- /dev/null
+++ b/src/m3ninx/index/segment/mem/unique_fields_iterator.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package mem
+
+import (
+ "bytes"
+ "sort"
+
+ sgmt "github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/m3ninx/postings"
+)
+
+type uniqueField struct {
+ field []byte
+ postingsList postings.List
+}
+
+type uniqueFieldsIter struct {
+ err error
+ done bool
+
+ currentIdx int
+ current uniqueField
+ backingSlice []uniqueField
+ opts Options
+}
+
+var _ sgmt.FieldsPostingsListIterator = &uniqueFieldsIter{}
+
+func newUniqueFieldsIter(slice []uniqueField, opts Options) *uniqueFieldsIter {
+ sortSliceOfUniqueFields(slice)
+ return &uniqueFieldsIter{
+ currentIdx: -1,
+ backingSlice: slice,
+ opts: opts,
+ }
+}
+
+func (b *uniqueFieldsIter) Next() bool {
+ if b.done || b.err != nil {
+ return false
+ }
+ b.currentIdx++
+ if b.currentIdx >= len(b.backingSlice) {
+ b.done = true
+ return false
+ }
+ b.current = b.backingSlice[b.currentIdx]
+ return true
+}
+
+func (b *uniqueFieldsIter) Current() ([]byte, postings.List) {
+ return b.current.field, b.current.postingsList
+}
+
+func (b *uniqueFieldsIter) Err() error {
+ return nil
+}
+
+func (b *uniqueFieldsIter) Len() int {
+ return len(b.backingSlice)
+}
+
+func (b *uniqueFieldsIter) Close() error {
+ b.current = uniqueField{}
+ return nil
+}
+
+func sortSliceOfUniqueFields(b []uniqueField) {
+ sort.Slice(b, func(i, j int) bool {
+ return bytes.Compare(b[i].field, b[j].field) < 0
+ })
+}
diff --git a/src/m3ninx/index/segment/segment_mock.go b/src/m3ninx/index/segment/segment_mock.go
index 3325e03523..4721f195c2 100644
--- a/src/m3ninx/index/segment/segment_mock.go
+++ b/src/m3ninx/index/segment/segment_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/m3ninx/index/segment/types.go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -130,10 +130,10 @@ func (mr *MockSegmentMockRecorder) ContainsField(field interface{}) *gomock.Call
}
// Reader mocks base method
-func (m *MockSegment) Reader() (index.Reader, error) {
+func (m *MockSegment) Reader() (Reader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reader")
- ret0, _ := ret[0].(index.Reader)
+ ret0, _ := ret[0].(Reader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -158,6 +158,193 @@ func (mr *MockSegmentMockRecorder) Close() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockSegment)(nil).Close))
}
+// MockReader is a mock of Reader interface
+type MockReader struct {
+ ctrl *gomock.Controller
+ recorder *MockReaderMockRecorder
+}
+
+// MockReaderMockRecorder is the mock recorder for MockReader
+type MockReaderMockRecorder struct {
+ mock *MockReader
+}
+
+// NewMockReader creates a new mock instance
+func NewMockReader(ctrl *gomock.Controller) *MockReader {
+ mock := &MockReader{ctrl: ctrl}
+ mock.recorder = &MockReaderMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockReader) EXPECT() *MockReaderMockRecorder {
+ return m.recorder
+}
+
+// Doc mocks base method
+func (m *MockReader) Doc(id postings.ID) (doc.Document, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Doc", id)
+ ret0, _ := ret[0].(doc.Document)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Doc indicates an expected call of Doc
+func (mr *MockReaderMockRecorder) Doc(id interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Doc", reflect.TypeOf((*MockReader)(nil).Doc), id)
+}
+
+// MatchField mocks base method
+func (m *MockReader) MatchField(field []byte) (postings.List, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MatchField", field)
+ ret0, _ := ret[0].(postings.List)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MatchField indicates an expected call of MatchField
+func (mr *MockReaderMockRecorder) MatchField(field interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MatchField", reflect.TypeOf((*MockReader)(nil).MatchField), field)
+}
+
+// MatchTerm mocks base method
+func (m *MockReader) MatchTerm(field, term []byte) (postings.List, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MatchTerm", field, term)
+ ret0, _ := ret[0].(postings.List)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MatchTerm indicates an expected call of MatchTerm
+func (mr *MockReaderMockRecorder) MatchTerm(field, term interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MatchTerm", reflect.TypeOf((*MockReader)(nil).MatchTerm), field, term)
+}
+
+// MatchRegexp mocks base method
+func (m *MockReader) MatchRegexp(field []byte, c index.CompiledRegex) (postings.List, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MatchRegexp", field, c)
+ ret0, _ := ret[0].(postings.List)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MatchRegexp indicates an expected call of MatchRegexp
+func (mr *MockReaderMockRecorder) MatchRegexp(field, c interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MatchRegexp", reflect.TypeOf((*MockReader)(nil).MatchRegexp), field, c)
+}
+
+// MatchAll mocks base method
+func (m *MockReader) MatchAll() (postings.MutableList, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MatchAll")
+ ret0, _ := ret[0].(postings.MutableList)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// MatchAll indicates an expected call of MatchAll
+func (mr *MockReaderMockRecorder) MatchAll() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MatchAll", reflect.TypeOf((*MockReader)(nil).MatchAll))
+}
+
+// Docs mocks base method
+func (m *MockReader) Docs(pl postings.List) (doc.Iterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Docs", pl)
+ ret0, _ := ret[0].(doc.Iterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Docs indicates an expected call of Docs
+func (mr *MockReaderMockRecorder) Docs(pl interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Docs", reflect.TypeOf((*MockReader)(nil).Docs), pl)
+}
+
+// AllDocs mocks base method
+func (m *MockReader) AllDocs() (index.IDDocIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AllDocs")
+ ret0, _ := ret[0].(index.IDDocIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AllDocs indicates an expected call of AllDocs
+func (mr *MockReaderMockRecorder) AllDocs() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockReader)(nil).AllDocs))
+}
+
+// Close mocks base method
+func (m *MockReader) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close
+func (mr *MockReaderMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockReader)(nil).Close))
+}
+
+// Fields mocks base method
+func (m *MockReader) Fields() (FieldsIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Fields")
+ ret0, _ := ret[0].(FieldsIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Fields indicates an expected call of Fields
+func (mr *MockReaderMockRecorder) Fields() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockReader)(nil).Fields))
+}
+
+// Terms mocks base method
+func (m *MockReader) Terms(field []byte) (TermsIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Terms", field)
+ ret0, _ := ret[0].(TermsIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Terms indicates an expected call of Terms
+func (mr *MockReaderMockRecorder) Terms(field interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockReader)(nil).Terms), field)
+}
+
+// ContainsField mocks base method
+func (m *MockReader) ContainsField(field []byte) (bool, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ContainsField", field)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// ContainsField indicates an expected call of ContainsField
+func (mr *MockReaderMockRecorder) ContainsField(field interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainsField", reflect.TypeOf((*MockReader)(nil).ContainsField), field)
+}
+
// MockFieldsIterable is a mock of FieldsIterable interface
type MockFieldsIterable struct {
ctrl *gomock.Controller
@@ -196,6 +383,44 @@ func (mr *MockFieldsIterableMockRecorder) Fields() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockFieldsIterable)(nil).Fields))
}
+// MockFieldsPostingsListIterable is a mock of FieldsPostingsListIterable interface
+type MockFieldsPostingsListIterable struct {
+ ctrl *gomock.Controller
+ recorder *MockFieldsPostingsListIterableMockRecorder
+}
+
+// MockFieldsPostingsListIterableMockRecorder is the mock recorder for MockFieldsPostingsListIterable
+type MockFieldsPostingsListIterableMockRecorder struct {
+ mock *MockFieldsPostingsListIterable
+}
+
+// NewMockFieldsPostingsListIterable creates a new mock instance
+func NewMockFieldsPostingsListIterable(ctrl *gomock.Controller) *MockFieldsPostingsListIterable {
+ mock := &MockFieldsPostingsListIterable{ctrl: ctrl}
+ mock.recorder = &MockFieldsPostingsListIterableMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockFieldsPostingsListIterable) EXPECT() *MockFieldsPostingsListIterableMockRecorder {
+ return m.recorder
+}
+
+// FieldsPostingsList mocks base method
+func (m *MockFieldsPostingsListIterable) FieldsPostingsList() (FieldsPostingsListIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "FieldsPostingsList")
+ ret0, _ := ret[0].(FieldsPostingsListIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// FieldsPostingsList indicates an expected call of FieldsPostingsList
+func (mr *MockFieldsPostingsListIterableMockRecorder) FieldsPostingsList() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsPostingsList", reflect.TypeOf((*MockFieldsPostingsListIterable)(nil).FieldsPostingsList))
+}
+
// MockTermsIterable is a mock of TermsIterable interface
type MockTermsIterable struct {
ctrl *gomock.Controller
@@ -313,6 +538,86 @@ func (mr *MockOrderedBytesIteratorMockRecorder) Close() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockOrderedBytesIterator)(nil).Close))
}
+// MockFieldsPostingsListIterator is a mock of FieldsPostingsListIterator interface
+type MockFieldsPostingsListIterator struct {
+ ctrl *gomock.Controller
+ recorder *MockFieldsPostingsListIteratorMockRecorder
+}
+
+// MockFieldsPostingsListIteratorMockRecorder is the mock recorder for MockFieldsPostingsListIterator
+type MockFieldsPostingsListIteratorMockRecorder struct {
+ mock *MockFieldsPostingsListIterator
+}
+
+// NewMockFieldsPostingsListIterator creates a new mock instance
+func NewMockFieldsPostingsListIterator(ctrl *gomock.Controller) *MockFieldsPostingsListIterator {
+ mock := &MockFieldsPostingsListIterator{ctrl: ctrl}
+ mock.recorder = &MockFieldsPostingsListIteratorMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockFieldsPostingsListIterator) EXPECT() *MockFieldsPostingsListIteratorMockRecorder {
+ return m.recorder
+}
+
+// Next mocks base method
+func (m *MockFieldsPostingsListIterator) Next() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Next")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// Next indicates an expected call of Next
+func (mr *MockFieldsPostingsListIteratorMockRecorder) Next() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockFieldsPostingsListIterator)(nil).Next))
+}
+
+// Err mocks base method
+func (m *MockFieldsPostingsListIterator) Err() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Err")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Err indicates an expected call of Err
+func (mr *MockFieldsPostingsListIteratorMockRecorder) Err() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockFieldsPostingsListIterator)(nil).Err))
+}
+
+// Close mocks base method
+func (m *MockFieldsPostingsListIterator) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close
+func (mr *MockFieldsPostingsListIteratorMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockFieldsPostingsListIterator)(nil).Close))
+}
+
+// Current mocks base method
+func (m *MockFieldsPostingsListIterator) Current() ([]byte, postings.List) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Current")
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].(postings.List)
+ return ret0, ret1
+}
+
+// Current indicates an expected call of Current
+func (mr *MockFieldsPostingsListIteratorMockRecorder) Current() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockFieldsPostingsListIterator)(nil).Current))
+}
+
// MockFieldsIterator is a mock of FieldsIterator interface
type MockFieldsIterator struct {
ctrl *gomock.Controller
@@ -350,20 +655,6 @@ func (mr *MockFieldsIteratorMockRecorder) Next() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockFieldsIterator)(nil).Next))
}
-// Current mocks base method
-func (m *MockFieldsIterator) Current() []byte {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Current")
- ret0, _ := ret[0].([]byte)
- return ret0
-}
-
-// Current indicates an expected call of Current
-func (mr *MockFieldsIteratorMockRecorder) Current() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockFieldsIterator)(nil).Current))
-}
-
// Err mocks base method
func (m *MockFieldsIterator) Err() error {
m.ctrl.T.Helper()
@@ -392,6 +683,20 @@ func (mr *MockFieldsIteratorMockRecorder) Close() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockFieldsIterator)(nil).Close))
}
+// Current mocks base method
+func (m *MockFieldsIterator) Current() []byte {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Current")
+ ret0, _ := ret[0].([]byte)
+ return ret0
+}
+
+// Current indicates an expected call of Current
+func (mr *MockFieldsIteratorMockRecorder) Current() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockFieldsIterator)(nil).Current))
+}
+
// MockTermsIterator is a mock of TermsIterator interface
type MockTermsIterator struct {
ctrl *gomock.Controller
@@ -429,6 +734,34 @@ func (mr *MockTermsIteratorMockRecorder) Next() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockTermsIterator)(nil).Next))
}
+// Err mocks base method
+func (m *MockTermsIterator) Err() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Err")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Err indicates an expected call of Err
+func (mr *MockTermsIteratorMockRecorder) Err() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockTermsIterator)(nil).Err))
+}
+
+// Close mocks base method
+func (m *MockTermsIterator) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close
+func (mr *MockTermsIteratorMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockTermsIterator)(nil).Close))
+}
+
// Current mocks base method
func (m *MockTermsIterator) Current() ([]byte, postings.List) {
m.ctrl.T.Helper()
@@ -444,8 +777,45 @@ func (mr *MockTermsIteratorMockRecorder) Current() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockTermsIterator)(nil).Current))
}
+// MockIterator is a mock of Iterator interface
+type MockIterator struct {
+ ctrl *gomock.Controller
+ recorder *MockIteratorMockRecorder
+}
+
+// MockIteratorMockRecorder is the mock recorder for MockIterator
+type MockIteratorMockRecorder struct {
+ mock *MockIterator
+}
+
+// NewMockIterator creates a new mock instance
+func NewMockIterator(ctrl *gomock.Controller) *MockIterator {
+ mock := &MockIterator{ctrl: ctrl}
+ mock.recorder = &MockIteratorMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockIterator) EXPECT() *MockIteratorMockRecorder {
+ return m.recorder
+}
+
+// Next mocks base method
+func (m *MockIterator) Next() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Next")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// Next indicates an expected call of Next
+func (mr *MockIteratorMockRecorder) Next() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIterator)(nil).Next))
+}
+
// Err mocks base method
-func (m *MockTermsIterator) Err() error {
+func (m *MockIterator) Err() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Err")
ret0, _ := ret[0].(error)
@@ -453,13 +823,13 @@ func (m *MockTermsIterator) Err() error {
}
// Err indicates an expected call of Err
-func (mr *MockTermsIteratorMockRecorder) Err() *gomock.Call {
+func (mr *MockIteratorMockRecorder) Err() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockTermsIterator)(nil).Err))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockIterator)(nil).Err))
}
// Close mocks base method
-func (m *MockTermsIterator) Close() error {
+func (m *MockIterator) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
@@ -467,9 +837,9 @@ func (m *MockTermsIterator) Close() error {
}
// Close indicates an expected call of Close
-func (mr *MockTermsIteratorMockRecorder) Close() *gomock.Call {
+func (mr *MockIteratorMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockTermsIterator)(nil).Close))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIterator)(nil).Close))
}
// MockMutableSegment is a mock of MutableSegment interface
@@ -568,10 +938,10 @@ func (mr *MockMutableSegmentMockRecorder) ContainsField(field interface{}) *gomo
}
// Reader mocks base method
-func (m *MockMutableSegment) Reader() (index.Reader, error) {
+func (m *MockMutableSegment) Reader() (Reader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reader")
- ret0, _ := ret[0].(index.Reader)
+ ret0, _ := ret[0].(Reader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -596,19 +966,19 @@ func (mr *MockMutableSegmentMockRecorder) Close() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMutableSegment)(nil).Close))
}
-// Fields mocks base method
-func (m *MockMutableSegment) Fields() (FieldsIterator, error) {
+// FieldsPostingsList mocks base method
+func (m *MockMutableSegment) FieldsPostingsList() (FieldsPostingsListIterator, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Fields")
- ret0, _ := ret[0].(FieldsIterator)
+ ret := m.ctrl.Call(m, "FieldsPostingsList")
+ ret0, _ := ret[0].(FieldsPostingsListIterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// Fields indicates an expected call of Fields
-func (mr *MockMutableSegmentMockRecorder) Fields() *gomock.Call {
+// FieldsPostingsList indicates an expected call of FieldsPostingsList
+func (mr *MockMutableSegmentMockRecorder) FieldsPostingsList() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockMutableSegment)(nil).Fields))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsPostingsList", reflect.TypeOf((*MockMutableSegment)(nil).FieldsPostingsList))
}
// Terms mocks base method
@@ -627,15 +997,15 @@ func (mr *MockMutableSegmentMockRecorder) Terms(field interface{}) *gomock.Call
}
// Reset mocks base method
-func (m *MockMutableSegment) Reset(offset postings.ID) {
+func (m *MockMutableSegment) Reset() {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "Reset", offset)
+ m.ctrl.Call(m, "Reset")
}
// Reset indicates an expected call of Reset
-func (mr *MockMutableSegmentMockRecorder) Reset(offset interface{}) *gomock.Call {
+func (mr *MockMutableSegmentMockRecorder) Reset() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockMutableSegment)(nil).Reset), offset)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockMutableSegment)(nil).Reset))
}
// Docs mocks base method
@@ -696,18 +1066,45 @@ func (mr *MockMutableSegmentMockRecorder) InsertBatch(b interface{}) *gomock.Cal
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MockMutableSegment)(nil).InsertBatch), b)
}
-// Offset mocks base method
-func (m *MockMutableSegment) Offset() postings.ID {
+// SetIndexConcurrency mocks base method
+func (m *MockMutableSegment) SetIndexConcurrency(value int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetIndexConcurrency", value)
+}
+
+// SetIndexConcurrency indicates an expected call of SetIndexConcurrency
+func (mr *MockMutableSegmentMockRecorder) SetIndexConcurrency(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexConcurrency", reflect.TypeOf((*MockMutableSegment)(nil).SetIndexConcurrency), value)
+}
+
+// IndexConcurrency mocks base method
+func (m *MockMutableSegment) IndexConcurrency() int {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Offset")
- ret0, _ := ret[0].(postings.ID)
+ ret := m.ctrl.Call(m, "IndexConcurrency")
+ ret0, _ := ret[0].(int)
return ret0
}
-// Offset indicates an expected call of Offset
-func (mr *MockMutableSegmentMockRecorder) Offset() *gomock.Call {
+// IndexConcurrency indicates an expected call of IndexConcurrency
+func (mr *MockMutableSegmentMockRecorder) IndexConcurrency() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexConcurrency", reflect.TypeOf((*MockMutableSegment)(nil).IndexConcurrency))
+}
+
+// Fields mocks base method
+func (m *MockMutableSegment) Fields() (FieldsIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Fields")
+ ret0, _ := ret[0].(FieldsIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Fields indicates an expected call of Fields
+func (mr *MockMutableSegmentMockRecorder) Fields() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Offset", reflect.TypeOf((*MockMutableSegment)(nil).Offset))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockMutableSegment)(nil).Fields))
}
// Seal mocks base method
@@ -834,10 +1231,10 @@ func (mr *MockImmutableSegmentMockRecorder) ContainsField(field interface{}) *go
}
// Reader mocks base method
-func (m *MockImmutableSegment) Reader() (index.Reader, error) {
+func (m *MockImmutableSegment) Reader() (Reader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reader")
- ret0, _ := ret[0].(index.Reader)
+ ret0, _ := ret[0].(Reader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -899,19 +1296,19 @@ func (m *MockBuilder) EXPECT() *MockBuilderMockRecorder {
return m.recorder
}
-// Fields mocks base method
-func (m *MockBuilder) Fields() (FieldsIterator, error) {
+// FieldsPostingsList mocks base method
+func (m *MockBuilder) FieldsPostingsList() (FieldsPostingsListIterator, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Fields")
- ret0, _ := ret[0].(FieldsIterator)
+ ret := m.ctrl.Call(m, "FieldsPostingsList")
+ ret0, _ := ret[0].(FieldsPostingsListIterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// Fields indicates an expected call of Fields
-func (mr *MockBuilderMockRecorder) Fields() *gomock.Call {
+// FieldsPostingsList indicates an expected call of FieldsPostingsList
+func (mr *MockBuilderMockRecorder) FieldsPostingsList() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockBuilder)(nil).Fields))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsPostingsList", reflect.TypeOf((*MockBuilder)(nil).FieldsPostingsList))
}
// Terms mocks base method
@@ -930,15 +1327,15 @@ func (mr *MockBuilderMockRecorder) Terms(field interface{}) *gomock.Call {
}
// Reset mocks base method
-func (m *MockBuilder) Reset(offset postings.ID) {
+func (m *MockBuilder) Reset() {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "Reset", offset)
+ m.ctrl.Call(m, "Reset")
}
// Reset indicates an expected call of Reset
-func (mr *MockBuilderMockRecorder) Reset(offset interface{}) *gomock.Call {
+func (mr *MockBuilderMockRecorder) Reset() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockBuilder)(nil).Reset), offset)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockBuilder)(nil).Reset))
}
// Docs mocks base method
@@ -993,19 +1390,19 @@ func (m *MockDocumentsBuilder) EXPECT() *MockDocumentsBuilderMockRecorder {
return m.recorder
}
-// Fields mocks base method
-func (m *MockDocumentsBuilder) Fields() (FieldsIterator, error) {
+// FieldsPostingsList mocks base method
+func (m *MockDocumentsBuilder) FieldsPostingsList() (FieldsPostingsListIterator, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Fields")
- ret0, _ := ret[0].(FieldsIterator)
+ ret := m.ctrl.Call(m, "FieldsPostingsList")
+ ret0, _ := ret[0].(FieldsPostingsListIterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// Fields indicates an expected call of Fields
-func (mr *MockDocumentsBuilderMockRecorder) Fields() *gomock.Call {
+// FieldsPostingsList indicates an expected call of FieldsPostingsList
+func (mr *MockDocumentsBuilderMockRecorder) FieldsPostingsList() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockDocumentsBuilder)(nil).Fields))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsPostingsList", reflect.TypeOf((*MockDocumentsBuilder)(nil).FieldsPostingsList))
}
// Terms mocks base method
@@ -1024,15 +1421,15 @@ func (mr *MockDocumentsBuilderMockRecorder) Terms(field interface{}) *gomock.Cal
}
// Reset mocks base method
-func (m *MockDocumentsBuilder) Reset(offset postings.ID) {
+func (m *MockDocumentsBuilder) Reset() {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "Reset", offset)
+ m.ctrl.Call(m, "Reset")
}
// Reset indicates an expected call of Reset
-func (mr *MockDocumentsBuilderMockRecorder) Reset(offset interface{}) *gomock.Call {
+func (mr *MockDocumentsBuilderMockRecorder) Reset() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockDocumentsBuilder)(nil).Reset), offset)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockDocumentsBuilder)(nil).Reset))
}
// Docs mocks base method
@@ -1093,6 +1490,195 @@ func (mr *MockDocumentsBuilderMockRecorder) InsertBatch(b interface{}) *gomock.C
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MockDocumentsBuilder)(nil).InsertBatch), b)
}
+// SetIndexConcurrency mocks base method
+func (m *MockDocumentsBuilder) SetIndexConcurrency(value int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetIndexConcurrency", value)
+}
+
+// SetIndexConcurrency indicates an expected call of SetIndexConcurrency
+func (mr *MockDocumentsBuilderMockRecorder) SetIndexConcurrency(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexConcurrency", reflect.TypeOf((*MockDocumentsBuilder)(nil).SetIndexConcurrency), value)
+}
+
+// IndexConcurrency mocks base method
+func (m *MockDocumentsBuilder) IndexConcurrency() int {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IndexConcurrency")
+ ret0, _ := ret[0].(int)
+ return ret0
+}
+
+// IndexConcurrency indicates an expected call of IndexConcurrency
+func (mr *MockDocumentsBuilderMockRecorder) IndexConcurrency() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexConcurrency", reflect.TypeOf((*MockDocumentsBuilder)(nil).IndexConcurrency))
+}
+
+// MockCloseableDocumentsBuilder is a mock of CloseableDocumentsBuilder interface
+type MockCloseableDocumentsBuilder struct {
+ ctrl *gomock.Controller
+ recorder *MockCloseableDocumentsBuilderMockRecorder
+}
+
+// MockCloseableDocumentsBuilderMockRecorder is the mock recorder for MockCloseableDocumentsBuilder
+type MockCloseableDocumentsBuilderMockRecorder struct {
+ mock *MockCloseableDocumentsBuilder
+}
+
+// NewMockCloseableDocumentsBuilder creates a new mock instance
+func NewMockCloseableDocumentsBuilder(ctrl *gomock.Controller) *MockCloseableDocumentsBuilder {
+ mock := &MockCloseableDocumentsBuilder{ctrl: ctrl}
+ mock.recorder = &MockCloseableDocumentsBuilderMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockCloseableDocumentsBuilder) EXPECT() *MockCloseableDocumentsBuilderMockRecorder {
+ return m.recorder
+}
+
+// FieldsPostingsList mocks base method
+func (m *MockCloseableDocumentsBuilder) FieldsPostingsList() (FieldsPostingsListIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "FieldsPostingsList")
+ ret0, _ := ret[0].(FieldsPostingsListIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// FieldsPostingsList indicates an expected call of FieldsPostingsList
+func (mr *MockCloseableDocumentsBuilderMockRecorder) FieldsPostingsList() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsPostingsList", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).FieldsPostingsList))
+}
+
+// Terms mocks base method
+func (m *MockCloseableDocumentsBuilder) Terms(field []byte) (TermsIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Terms", field)
+ ret0, _ := ret[0].(TermsIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Terms indicates an expected call of Terms
+func (mr *MockCloseableDocumentsBuilderMockRecorder) Terms(field interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terms", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).Terms), field)
+}
+
+// Reset mocks base method
+func (m *MockCloseableDocumentsBuilder) Reset() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Reset")
+}
+
+// Reset indicates an expected call of Reset
+func (mr *MockCloseableDocumentsBuilderMockRecorder) Reset() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).Reset))
+}
+
+// Docs mocks base method
+func (m *MockCloseableDocumentsBuilder) Docs() []doc.Document {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Docs")
+ ret0, _ := ret[0].([]doc.Document)
+ return ret0
+}
+
+// Docs indicates an expected call of Docs
+func (mr *MockCloseableDocumentsBuilderMockRecorder) Docs() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Docs", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).Docs))
+}
+
+// AllDocs mocks base method
+func (m *MockCloseableDocumentsBuilder) AllDocs() (index.IDDocIterator, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AllDocs")
+ ret0, _ := ret[0].(index.IDDocIterator)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AllDocs indicates an expected call of AllDocs
+func (mr *MockCloseableDocumentsBuilderMockRecorder) AllDocs() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllDocs", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).AllDocs))
+}
+
+// Insert mocks base method
+func (m *MockCloseableDocumentsBuilder) Insert(d doc.Document) ([]byte, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Insert", d)
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Insert indicates an expected call of Insert
+func (mr *MockCloseableDocumentsBuilderMockRecorder) Insert(d interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).Insert), d)
+}
+
+// InsertBatch mocks base method
+func (m *MockCloseableDocumentsBuilder) InsertBatch(b index.Batch) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InsertBatch", b)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// InsertBatch indicates an expected call of InsertBatch
+func (mr *MockCloseableDocumentsBuilderMockRecorder) InsertBatch(b interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertBatch", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).InsertBatch), b)
+}
+
+// SetIndexConcurrency mocks base method
+func (m *MockCloseableDocumentsBuilder) SetIndexConcurrency(value int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetIndexConcurrency", value)
+}
+
+// SetIndexConcurrency indicates an expected call of SetIndexConcurrency
+func (mr *MockCloseableDocumentsBuilderMockRecorder) SetIndexConcurrency(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexConcurrency", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).SetIndexConcurrency), value)
+}
+
+// IndexConcurrency mocks base method
+func (m *MockCloseableDocumentsBuilder) IndexConcurrency() int {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IndexConcurrency")
+ ret0, _ := ret[0].(int)
+ return ret0
+}
+
+// IndexConcurrency indicates an expected call of IndexConcurrency
+func (mr *MockCloseableDocumentsBuilderMockRecorder) IndexConcurrency() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexConcurrency", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).IndexConcurrency))
+}
+
+// Close mocks base method
+func (m *MockCloseableDocumentsBuilder) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close
+func (mr *MockCloseableDocumentsBuilderMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockCloseableDocumentsBuilder)(nil).Close))
+}
+
// MockSegmentsBuilder is a mock of SegmentsBuilder interface
type MockSegmentsBuilder struct {
ctrl *gomock.Controller
@@ -1116,19 +1702,19 @@ func (m *MockSegmentsBuilder) EXPECT() *MockSegmentsBuilderMockRecorder {
return m.recorder
}
-// Fields mocks base method
-func (m *MockSegmentsBuilder) Fields() (FieldsIterator, error) {
+// FieldsPostingsList mocks base method
+func (m *MockSegmentsBuilder) FieldsPostingsList() (FieldsPostingsListIterator, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Fields")
- ret0, _ := ret[0].(FieldsIterator)
+ ret := m.ctrl.Call(m, "FieldsPostingsList")
+ ret0, _ := ret[0].(FieldsPostingsListIterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-// Fields indicates an expected call of Fields
-func (mr *MockSegmentsBuilderMockRecorder) Fields() *gomock.Call {
+// FieldsPostingsList indicates an expected call of FieldsPostingsList
+func (mr *MockSegmentsBuilderMockRecorder) FieldsPostingsList() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fields", reflect.TypeOf((*MockSegmentsBuilder)(nil).Fields))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldsPostingsList", reflect.TypeOf((*MockSegmentsBuilder)(nil).FieldsPostingsList))
}
// Terms mocks base method
@@ -1147,15 +1733,15 @@ func (mr *MockSegmentsBuilderMockRecorder) Terms(field interface{}) *gomock.Call
}
// Reset mocks base method
-func (m *MockSegmentsBuilder) Reset(offset postings.ID) {
+func (m *MockSegmentsBuilder) Reset() {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "Reset", offset)
+ m.ctrl.Call(m, "Reset")
}
// Reset indicates an expected call of Reset
-func (mr *MockSegmentsBuilderMockRecorder) Reset(offset interface{}) *gomock.Call {
+func (mr *MockSegmentsBuilderMockRecorder) Reset() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockSegmentsBuilder)(nil).Reset), offset)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockSegmentsBuilder)(nil).Reset))
}
// Docs mocks base method
diff --git a/src/m3ninx/index/segment/types.go b/src/m3ninx/index/segment/types.go
index c80bc3a8c1..566a25bd00 100644
--- a/src/m3ninx/index/segment/types.go
+++ b/src/m3ninx/index/segment/types.go
@@ -57,12 +57,23 @@ type Segment interface {
ContainsField(field []byte) (bool, error)
// Reader returns a point-in-time accessor to search the segment.
- Reader() (index.Reader, error)
+ Reader() (Reader, error)
// Close closes the segment and releases any internal resources.
Close() error
}
+// Reader extends index reader interface to allow for reading
+// of fields and terms.
+type Reader interface {
+ index.Reader
+ FieldsIterable
+ TermsIterable
+
+ // ContainsField returns a bool indicating if the Segment contains the provided field.
+ ContainsField(field []byte) (bool, error)
+}
+
// FieldsIterable can iterate over segment fields, it is not by default
// concurrency safe.
type FieldsIterable interface {
@@ -72,6 +83,15 @@ type FieldsIterable interface {
Fields() (FieldsIterator, error)
}
+// FieldsPostingsListIterable can iterate over segment fields/postings lists, it is not by default
+// concurrency safe.
+type FieldsPostingsListIterable interface {
+ // Fields returns an iterator over the list of known fields, in order
+ // by name, it is not valid for reading after mutating the
+ // builder by inserting more documents.
+ FieldsPostingsList() (FieldsPostingsListIterator, error)
+}
+
// TermsIterable can iterate over segment terms, it is not by default
// concurrency safe.
type TermsIterable interface {
@@ -97,30 +117,37 @@ type OrderedBytesIterator interface {
Close() error
}
+// FieldsPostingsListIterator iterates over all known fields.
+type FieldsPostingsListIterator interface {
+ Iterator
+
+ // Current returns the current field and associated postings list.
+ // NB: the field returned is only valid until the subsequent call to Next().
+ Current() ([]byte, postings.List)
+}
+
// FieldsIterator iterates over all known fields.
type FieldsIterator interface {
- // Next returns a bool indicating if there are any more elements.
- Next() bool
+ Iterator
- // Current returns the current element.
- // NB: the element returned is only valid until the subsequent call to Next().
+ // Current returns the current field.
+ // NB: the field returned is only valid until the subsequent call to Next().
Current() []byte
-
- // Err returns any errors encountered during iteration.
- Err() error
-
- // Close releases any resources held by the iterator.
- Close() error
}
// TermsIterator iterates over all known terms for the provided field.
type TermsIterator interface {
- // Next returns a bool indicating if there are any more elements.
- Next() bool
+ Iterator
// Current returns the current element.
// NB: the element returned is only valid until the subsequent call to Next().
Current() (term []byte, postings postings.List)
+}
+
+// Iterator holds common iterator methods.
+type Iterator interface {
+ // Next returns a bool indicating if there are any more elements.
+ Next() bool
// Err returns any errors encountered during iteration.
Err() error
@@ -134,8 +161,10 @@ type MutableSegment interface {
Segment
DocumentsBuilder
- // Offset returns the postings offset.
- Offset() postings.ID
+ // Fields returns an iterator over the list of known fields, in order
+ // by name, it is not valid for reading after mutating the
+ // builder by inserting more documents.
+ Fields() (FieldsIterator, error)
// Seal marks the Mutable Segment immutable.
Seal() error
@@ -153,11 +182,11 @@ type ImmutableSegment interface {
// Builder is a builder that can be used to construct segments.
type Builder interface {
- FieldsIterable
+ FieldsPostingsListIterable
TermsIterable
// Reset resets the builder for reuse.
- Reset(offset postings.ID)
+ Reset()
// Docs returns the current docs slice, this is not safe to modify
// and is invalidated on a call to reset.
@@ -167,10 +196,23 @@ type Builder interface {
AllDocs() (index.IDDocIterator, error)
}
-// DocumentsBuilder is a builder is written documents to.
+// DocumentsBuilder is a builder that has documents written to it.
type DocumentsBuilder interface {
Builder
index.Writer
+
+ // SetIndexConcurrency sets the concurrency used for building the segment.
+ SetIndexConcurrency(value int)
+
+ // IndexConcurrency returns the concurrency used for building the segment.
+ IndexConcurrency() int
+}
+
+// CloseableDocumentsBuilder is a builder that has documents written to it and has freeable resources.
+type CloseableDocumentsBuilder interface {
+ DocumentsBuilder
+
+ Close() error
}
// SegmentsBuilder is a builder that is built from segments.
diff --git a/src/m3ninx/index/types.go b/src/m3ninx/index/types.go
index 1a2fbf6beb..99a141fba6 100644
--- a/src/m3ninx/index/types.go
+++ b/src/m3ninx/index/types.go
@@ -28,7 +28,8 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/postings"
xerrors "github.com/m3db/m3/src/x/errors"
- vregex "github.com/m3db/vellum/regexp"
+
+ vregex "github.com/m3dbx/vellum/regexp"
)
// ErrDocNotFound is the error returned when there is no document for a given postings ID.
diff --git a/src/m3ninx/persist/persist_mock.go b/src/m3ninx/persist/persist_mock.go
index bc45964f4b..7776be1956 100644
--- a/src/m3ninx/persist/persist_mock.go
+++ b/src/m3ninx/persist/persist_mock.go
@@ -351,6 +351,20 @@ func (mr *MockIndexFileSetReaderMockRecorder) ReadSegmentFileSet() *gomock.Call
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadSegmentFileSet", reflect.TypeOf((*MockIndexFileSetReader)(nil).ReadSegmentFileSet))
}
+// IndexVolumeType mocks base method
+func (m *MockIndexFileSetReader) IndexVolumeType() IndexVolumeType {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IndexVolumeType")
+ ret0, _ := ret[0].(IndexVolumeType)
+ return ret0
+}
+
+// IndexVolumeType indicates an expected call of IndexVolumeType
+func (mr *MockIndexFileSetReaderMockRecorder) IndexVolumeType() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexVolumeType", reflect.TypeOf((*MockIndexFileSetReader)(nil).IndexVolumeType))
+}
+
// MockIndexSegmentFileSet is a mock of IndexSegmentFileSet interface
type MockIndexSegmentFileSet struct {
ctrl *gomock.Controller
diff --git a/src/m3ninx/persist/types.go b/src/m3ninx/persist/types.go
index 9aa57307bb..ec7eef2192 100644
--- a/src/m3ninx/persist/types.go
+++ b/src/m3ninx/persist/types.go
@@ -73,6 +73,8 @@ type IndexFileSetReader interface {
// The IndexSegmentFileSet will only be valid before it's closed,
// after that calls to Read or Bytes on it will have unexpected results.
ReadSegmentFileSet() (IndexSegmentFileSet, error)
+
+ IndexVolumeType() IndexVolumeType
}
// IndexSegmentFileSet is an index segment file set.
@@ -96,6 +98,15 @@ type IndexSegmentFile interface {
Mmap() (mmap.Descriptor, error)
}
+// IndexVolumeType is the type of an index volume.
+type IndexVolumeType string
+
+const (
+ // DefaultIndexVolumeType is a default IndexVolumeType.
+ // This is the type if not otherwise specified.
+ DefaultIndexVolumeType IndexVolumeType = "default"
+)
+
// IndexSegmentType is the type of an index file set.
type IndexSegmentType string
diff --git a/src/m3ninx/persist/writer.go b/src/m3ninx/persist/writer.go
index 87672005d1..23bab30e0a 100644
--- a/src/m3ninx/persist/writer.go
+++ b/src/m3ninx/persist/writer.go
@@ -21,24 +21,34 @@
package persist
import (
+ "errors"
"fmt"
"io"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ "github.com/m3db/m3/src/m3ninx/x"
+)
+
+var (
+ errDocsDataFileNotWritten = errors.New("docs data file must be written before index data")
)
// NewMutableSegmentFileSetWriter returns a new IndexSegmentFileSetWriter for writing
// out the provided Mutable Segment.
-func NewMutableSegmentFileSetWriter() (MutableSegmentFileSetWriter, error) {
- w, err := fst.NewWriter(fst.WriterOptions{})
+func NewMutableSegmentFileSetWriter(
+ fstOpts fst.WriterOptions,
+) (MutableSegmentFileSetWriter, error) {
+ w, err := fst.NewWriter(fstOpts)
if err != nil {
return nil, err
}
return newMutableSegmentFileSetWriter(w)
}
-func newMutableSegmentFileSetWriter(fsWriter fst.Writer) (MutableSegmentFileSetWriter, error) {
+func newMutableSegmentFileSetWriter(
+ fsWriter fst.Writer,
+) (MutableSegmentFileSetWriter, error) {
return &writer{
fsWriter: fsWriter,
}, nil
@@ -95,3 +105,106 @@ func (w *writer) WriteFile(fileType IndexSegmentFileType, iow io.Writer) error {
}
return fmt.Errorf("unknown fileType: %s provided", fileType)
}
+
+// NewFSTSegmentDataFileSetWriter creates a new file set writer for
+// fst segment data.
+func NewFSTSegmentDataFileSetWriter(
+ data fst.SegmentData,
+) (IndexSegmentFileSetWriter, error) {
+ if err := data.Validate(); err != nil {
+ return nil, err
+ }
+
+ docsWriter, err := fst.NewDocumentsWriter()
+ if err != nil {
+ return nil, err
+ }
+
+ return &fstSegmentDataWriter{
+ data: data,
+ docsWriter: docsWriter,
+ }, nil
+}
+
+type fstSegmentDataWriter struct {
+ data fst.SegmentData
+ docsWriter *fst.DocumentsWriter
+ docsDataFileWritten bool
+}
+
+func (w *fstSegmentDataWriter) SegmentType() IndexSegmentType {
+ return FSTIndexSegmentType
+}
+
+func (w *fstSegmentDataWriter) MajorVersion() int {
+ return w.data.Version.Major
+}
+
+func (w *fstSegmentDataWriter) MinorVersion() int {
+ return w.data.Version.Minor
+}
+
+func (w *fstSegmentDataWriter) SegmentMetadata() []byte {
+ return w.data.Metadata
+}
+
+func (w *fstSegmentDataWriter) Files() []IndexSegmentFileType {
+ return []IndexSegmentFileType{
+ DocumentDataIndexSegmentFileType,
+ DocumentIndexIndexSegmentFileType,
+ PostingsIndexSegmentFileType,
+ FSTTermsIndexSegmentFileType,
+ FSTFieldsIndexSegmentFileType,
+ }
+}
+
+func (w *fstSegmentDataWriter) WriteFile(fileType IndexSegmentFileType, iow io.Writer) error {
+ switch fileType {
+ case DocumentDataIndexSegmentFileType:
+ if err := w.writeDocsData(iow); err != nil {
+ return err
+ }
+ w.docsDataFileWritten = true
+ return nil
+ case DocumentIndexIndexSegmentFileType:
+ if !w.docsDataFileWritten {
+ return errDocsDataFileNotWritten
+ }
+ return w.writeDocsIndex(iow)
+ case PostingsIndexSegmentFileType:
+ _, err := iow.Write(w.data.PostingsData.Bytes)
+ return err
+ case FSTFieldsIndexSegmentFileType:
+ _, err := iow.Write(w.data.FSTFieldsData.Bytes)
+ return err
+ case FSTTermsIndexSegmentFileType:
+ _, err := iow.Write(w.data.FSTTermsData.Bytes)
+ return err
+ }
+ return fmt.Errorf("unknown fileType: %s provided", fileType)
+}
+
+func (w *fstSegmentDataWriter) writeDocsData(iow io.Writer) error {
+ if r := w.data.DocsReader; r != nil {
+ iter := r.Iter()
+ closer := x.NewSafeCloser(iter)
+ defer closer.Close()
+ w.docsWriter.Reset(fst.DocumentsWriterOptions{
+ Iter: iter,
+ SizeHint: r.Len(),
+ })
+ return w.docsWriter.WriteDocumentsData(iow)
+ }
+
+ _, err := iow.Write(w.data.DocsData.Bytes)
+ return err
+}
+
+func (w *fstSegmentDataWriter) writeDocsIndex(iow io.Writer) error {
+ if r := w.data.DocsReader; r != nil {
+ return w.docsWriter.WriteDocumentsIndex(iow)
+ }
+
+ _, err := iow.Write(w.data.DocsData.Bytes)
+ return err
+}
diff --git a/src/m3ninx/postings/atomic.go b/src/m3ninx/postings/atomic.go
index 94f41faa4d..7559ac0ff8 100644
--- a/src/m3ninx/postings/atomic.go
+++ b/src/m3ninx/postings/atomic.go
@@ -21,7 +21,7 @@
package postings
import (
- "github.com/uber-go/atomic"
+ "go.uber.org/atomic"
)
// AtomicID is an atomic ID.
diff --git a/src/m3ninx/postings/pilosa/codec.go b/src/m3ninx/postings/pilosa/codec.go
index 89fde43b4e..8c8998f7b1 100644
--- a/src/m3ninx/postings/pilosa/codec.go
+++ b/src/m3ninx/postings/pilosa/codec.go
@@ -25,7 +25,7 @@ import (
"github.com/m3db/m3/src/m3ninx/postings"
idxroaring "github.com/m3db/m3/src/m3ninx/postings/roaring"
- "github.com/m3db/pilosa/roaring"
+ "github.com/m3dbx/pilosa/roaring"
)
// Encoder helps serialize a Pilosa RoaringBitmap
diff --git a/src/m3ninx/postings/pilosa/iterator.go b/src/m3ninx/postings/pilosa/iterator.go
index 6ed651d073..eb5e06fba5 100644
--- a/src/m3ninx/postings/pilosa/iterator.go
+++ b/src/m3ninx/postings/pilosa/iterator.go
@@ -22,7 +22,7 @@ package pilosa
import (
"github.com/m3db/m3/src/m3ninx/postings"
- "github.com/m3db/pilosa/roaring"
+ "github.com/m3dbx/pilosa/roaring"
)
// NB: need to do this to find a path into our postings list which doesn't require every
diff --git a/src/m3ninx/postings/pilosa/iterator_test.go b/src/m3ninx/postings/pilosa/iterator_test.go
index d39445a768..d41d045e31 100644
--- a/src/m3ninx/postings/pilosa/iterator_test.go
+++ b/src/m3ninx/postings/pilosa/iterator_test.go
@@ -24,7 +24,7 @@ import (
"testing"
"github.com/m3db/m3/src/m3ninx/postings"
- "github.com/m3db/pilosa/roaring"
+ "github.com/m3dbx/pilosa/roaring"
"github.com/stretchr/testify/require"
)
diff --git a/src/m3ninx/postings/roaring/roaring.go b/src/m3ninx/postings/roaring/roaring.go
index e8b2c4e671..e1f8a3d820 100644
--- a/src/m3ninx/postings/roaring/roaring.go
+++ b/src/m3ninx/postings/roaring/roaring.go
@@ -26,7 +26,7 @@ import (
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/x"
- "github.com/m3db/pilosa/roaring"
+ "github.com/m3dbx/pilosa/roaring"
)
var (
diff --git a/src/m3ninx/postings/roaring/roaring_bench_unions_test.go b/src/m3ninx/postings/roaring/roaring_bench_unions_test.go
index 0c86eb5c2f..60bd29afd8 100644
--- a/src/m3ninx/postings/roaring/roaring_bench_unions_test.go
+++ b/src/m3ninx/postings/roaring/roaring_bench_unions_test.go
@@ -24,7 +24,7 @@ import (
"math/rand"
"testing"
- "github.com/m3db/pilosa/roaring"
+ "github.com/m3dbx/pilosa/roaring"
)
const (
diff --git a/src/m3ninx/search/proptest/segment_gen.go b/src/m3ninx/search/proptest/segment_gen.go
index 842c3d7059..dbc6ad9e43 100644
--- a/src/m3ninx/search/proptest/segment_gen.go
+++ b/src/m3ninx/search/proptest/segment_gen.go
@@ -29,7 +29,6 @@ import (
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
"github.com/m3db/m3/src/m3ninx/index/segment/mem"
- "github.com/m3db/m3/src/m3ninx/postings"
"github.com/leanovate/gopter"
"github.com/leanovate/gopter/gen"
@@ -56,7 +55,7 @@ func collectDocs(iter doc.Iterator) ([]doc.Document, error) {
func newTestMemSegment(t *testing.T, docs []doc.Document) segment.MutableSegment {
opts := mem.NewOptions()
- s, err := mem.NewSegment(postings.ID(0), opts)
+ s, err := mem.NewSegment(opts)
require.NoError(t, err)
for _, d := range docs {
_, err := s.Insert(d)
@@ -68,8 +67,7 @@ func newTestMemSegment(t *testing.T, docs []doc.Document) segment.MutableSegment
func (i propTestInput) generate(t *testing.T, docs []doc.Document) []segment.Segment {
var result []segment.Segment
for j := 0; j < len(i.segments); j++ {
- initialOffset := postings.ID(i.segments[j].initialDocIDOffset)
- s, err := mem.NewSegment(initialOffset, memOptions)
+ s, err := mem.NewSegment(memOptions)
require.NoError(t, err)
for k := 0; k < len(i.docIds[j]); k++ {
idx := i.docIds[j][k]
@@ -145,8 +143,7 @@ func genPropTestInput(numDocs int) gopter.Gen {
func genSegment() gopter.Gen {
return gopter.CombineGens(
- gen.Bool(), // simple segment
- gen.IntRange(1, 5), // initial doc id offset
+ gen.Bool(), // simple segment
).Map(func(val interface{}) generatedSegment {
var inputs []interface{}
if x, ok := val.(*gopter.GenResult); ok {
@@ -159,15 +156,13 @@ func genSegment() gopter.Gen {
inputs = val.([]interface{})
}
return generatedSegment{
- simpleSegment: inputs[0].(bool),
- initialDocIDOffset: inputs[1].(int),
+ simpleSegment: inputs[0].(bool),
}
})
}
type generatedSegment struct {
- simpleSegment bool
- initialDocIDOffset int
+ simpleSegment bool
}
type randomDocIds []int
diff --git a/src/m3nsch/README.md b/src/m3nsch/README.md
index 7056169667..744aa76f52 100644
--- a/src/m3nsch/README.md
+++ b/src/m3nsch/README.md
@@ -1,4 +1,4 @@
-m3nsch [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+m3nsch
======
m3nsch (pronounced `mensch`) is a load testing tool for M3DB. It has two components:
- `m3nsch_server`: long lived process which does the load generation
@@ -105,14 +105,3 @@ $ ./m3nsch_client --endpoints $ENDPOINTS stop
# probably want to teardown the running server processes on the various hosts
```
-
-
-
-This project is released under the [Apache License, Version 2.0](LICENSE).
-
-[doc-img]: https://godoc.org/github.com/m3db/m3nsch?status.svg
-[doc]: https://godoc.org/github.com/m3db/m3nsch
-[ci-img]: https://travis-ci.org/m3db/m3nsch.svg?branch=master
-[ci]: https://travis-ci.org/m3db/m3nsch
-[cov-img]: https://coveralls.io/repos/m3db/m3nsch/badge.svg?branch=master&service=github
-[cov]: https://coveralls.io/github/m3db/m3nsch?branch=master
diff --git a/src/m3nsch/agent/agent.go b/src/m3nsch/agent/agent.go
index 7f96784840..99216b6eb8 100644
--- a/src/m3nsch/agent/agent.go
+++ b/src/m3nsch/agent/agent.go
@@ -255,7 +255,7 @@ func (ms *m3nschAgent) MaxQPS() int64 {
// nolint: unparam
func (ms *m3nschAgent) newMethodMetrics(method string) instrument.MethodMetrics {
subScope := ms.opts.InstrumentOptions().MetricsScope().SubScope("agent")
- return instrument.NewMethodMetrics(subScope, method, ms.opts.InstrumentOptions().MetricsSamplingRate())
+ return instrument.NewMethodMetrics(subScope, method, ms.opts.InstrumentOptions().TimerOptions())
}
func (ms *m3nschAgent) tickPeriodWithLock() time.Duration {
diff --git a/src/m3nsch/generated/proto/m3nsch/m3nsch_pb_mock.go b/src/m3nsch/generated/proto/m3nsch/m3nsch_pb_mock.go
index 013bede9e6..292a4beffb 100644
--- a/src/m3nsch/generated/proto/m3nsch/m3nsch_pb_mock.go
+++ b/src/m3nsch/generated/proto/m3nsch/m3nsch_pb_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/m3nsch/generated/proto/m3nsch/pb.go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/metrics/README.md b/src/metrics/README.md
index c1bed3e918..0bd4f8b35f 100644
--- a/src/metrics/README.md
+++ b/src/metrics/README.md
@@ -1,7 +1,3 @@
## WARNING: This is Alpha software and not intended for use until a stable release.
Metrics data structures and serialization/deserialization protocols.
-
-
-
-This project is released under the [Apache License, Version 2.0](LICENSE).
diff --git a/src/metrics/aggregation/type_string.go b/src/metrics/aggregation/type_string.go
index 6a2e6a8710..d62ed64c18 100644
--- a/src/metrics/aggregation/type_string.go
+++ b/src/metrics/aggregation/type_string.go
@@ -26,6 +26,8 @@ import "fmt"
const _Type_name = "UnknownTypeLastMinMaxMeanMedianCountSumSumSqStdevP10P20P30P40P50P60P70P80P90P95P99P999P9999"
+var _Type_name_bytes = []byte("UnknownTypeLastMinMaxMeanMedianCountSumSumSqStdevP10P20P30P40P50P60P70P80P90P95P99P999P9999")
+
var _Type_index = [...]uint8{0, 11, 15, 18, 21, 25, 31, 36, 39, 44, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76, 79, 82, 86, 91}
func (i Type) String() string {
@@ -34,3 +36,10 @@ func (i Type) String() string {
}
return _Type_name[_Type_index[i]:_Type_index[i+1]]
}
+
+func (i Type) Bytes() []byte {
+ if i < 0 || i >= Type(len(_Type_index)-1) {
+ return []byte(fmt.Sprintf("Type(%d)", i))
+ }
+ return _Type_name_bytes[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/src/metrics/encoding/protobuf/reset.go b/src/metrics/encoding/protobuf/reset.go
index f546895465..6a97fdff5e 100644
--- a/src/metrics/encoding/protobuf/reset.go
+++ b/src/metrics/encoding/protobuf/reset.go
@@ -32,13 +32,21 @@ func resetAggregatedMetricProto(pb *metricpb.AggregatedMetric) {
pb.EncodeNanos = 0
}
-// resetMetricWithMetadatasProto resets the metric with metadatas proto, and
-// in particular message fields that are slices because the `Unmarshal` generated
+// ReuseMetricWithMetadatasProto allows for zero-alloc reuse of
+// *metricpb.MetricWithMetadatas by deep resetting the internal slices
+// and when using gogoprotobuf's unmarshal function will reuse the slices
+// and byte buffers already allocated on the message itself.
+//
+// It is required to use nullable: false annotations so that it does not
+// use pointer types for slices on the message to achieve zero alloc resets.
+//
+// The methods resets the metric with metadatas proto, and in particular
+// message fields that are slices because the `Unmarshal` generated
// from gogoprotobuf simply append a new entry at the end of the slice, and
// as such, the fields with slice types need to be reset to be zero-length.
// NB: reset only needs to be done to the top-level slice fields as the individual
// items in the slice are created afresh during unmarshaling.
-func resetMetricWithMetadatasProto(pb *metricpb.MetricWithMetadatas) {
+func ReuseMetricWithMetadatasProto(pb *metricpb.MetricWithMetadatas) {
if pb == nil {
return
}
@@ -48,6 +56,8 @@ func resetMetricWithMetadatasProto(pb *metricpb.MetricWithMetadatas) {
resetGaugeWithMetadatasProto(pb.GaugeWithMetadatas)
resetForwardedMetricWithMetadataProto(pb.ForwardedMetricWithMetadata)
resetTimedMetricWithMetadataProto(pb.TimedMetricWithMetadata)
+ resetTimedMetricWithMetadatasProto(pb.TimedMetricWithMetadatas)
+ resetTimedMetricWithStoragePolicyProto(pb.TimedMetricWithStoragePolicy)
}
func resetCounterWithMetadatasProto(pb *metricpb.CounterWithMetadatas) {
@@ -90,6 +100,14 @@ func resetTimedMetricWithMetadataProto(pb *metricpb.TimedMetricWithMetadata) {
resetTimedMetadata(&pb.Metadata)
}
+func resetTimedMetricWithMetadatasProto(pb *metricpb.TimedMetricWithMetadatas) {
+ if pb == nil {
+ return
+ }
+ resetTimedMetric(&pb.Metric)
+ resetMetadatas(&pb.Metadatas)
+}
+
func resetTimedMetricWithStoragePolicyProto(pb *metricpb.TimedMetricWithStoragePolicy) {
if pb == nil {
return
diff --git a/src/metrics/encoding/protobuf/reset_test.go b/src/metrics/encoding/protobuf/reset_test.go
index 9a97d6de25..a3a8b3eb94 100644
--- a/src/metrics/encoding/protobuf/reset_test.go
+++ b/src/metrics/encoding/protobuf/reset_test.go
@@ -114,8 +114,8 @@ var (
}
)
-func TestResetMetricWithMetadatasProtoNilProto(t *testing.T) {
- require.NotPanics(t, func() { resetMetricWithMetadatasProto(nil) })
+func TestReuseMetricWithMetadatasProtoNilProto(t *testing.T) {
+ require.NotPanics(t, func() { ReuseMetricWithMetadatasProto(nil) })
}
func TestResetAggregatedMetricProto(t *testing.T) {
@@ -145,7 +145,7 @@ func TestResetAggregatedMetricProto(t *testing.T) {
require.True(t, cap(input.Metric.TimedMetric.Id) > 0)
}
-func TestResetMetricWithMetadatasProtoOnlyCounter(t *testing.T) {
+func TestReuseMetricWithMetadatasProtoOnlyCounter(t *testing.T) {
input := &metricpb.MetricWithMetadatas{
Type: metricpb.MetricWithMetadatas_COUNTER_WITH_METADATAS,
CounterWithMetadatas: &metricpb.CounterWithMetadatas{
@@ -160,13 +160,13 @@ func TestResetMetricWithMetadatasProtoOnlyCounter(t *testing.T) {
Metadatas: testMetadatasAfterResetProto,
},
}
- resetMetricWithMetadatasProto(input)
+ ReuseMetricWithMetadatasProto(input)
require.Equal(t, expected, input)
require.True(t, cap(input.CounterWithMetadatas.Counter.Id) > 0)
require.True(t, cap(input.CounterWithMetadatas.Metadatas.Metadatas) > 0)
}
-func TestResetMetricWithMetadatasProtoOnlyBatchTimer(t *testing.T) {
+func TestReuseMetricWithMetadatasProtoOnlyBatchTimer(t *testing.T) {
input := &metricpb.MetricWithMetadatas{
Type: metricpb.MetricWithMetadatas_BATCH_TIMER_WITH_METADATAS,
BatchTimerWithMetadatas: &metricpb.BatchTimerWithMetadatas{
@@ -181,13 +181,13 @@ func TestResetMetricWithMetadatasProtoOnlyBatchTimer(t *testing.T) {
Metadatas: testMetadatasAfterResetProto,
},
}
- resetMetricWithMetadatasProto(input)
+ ReuseMetricWithMetadatasProto(input)
require.Equal(t, expected, input)
require.True(t, cap(input.BatchTimerWithMetadatas.BatchTimer.Id) > 0)
require.True(t, cap(input.BatchTimerWithMetadatas.Metadatas.Metadatas) > 0)
}
-func TestResetMetricWithMetadatasProtoOnlyGauge(t *testing.T) {
+func TestReuseMetricWithMetadatasProtoOnlyGauge(t *testing.T) {
input := &metricpb.MetricWithMetadatas{
Type: metricpb.MetricWithMetadatas_GAUGE_WITH_METADATAS,
GaugeWithMetadatas: &metricpb.GaugeWithMetadatas{
@@ -202,13 +202,13 @@ func TestResetMetricWithMetadatasProtoOnlyGauge(t *testing.T) {
Metadatas: testMetadatasAfterResetProto,
},
}
- resetMetricWithMetadatasProto(input)
+ ReuseMetricWithMetadatasProto(input)
require.Equal(t, expected, input)
require.True(t, cap(input.GaugeWithMetadatas.Gauge.Id) > 0)
require.True(t, cap(input.GaugeWithMetadatas.Metadatas.Metadatas) > 0)
}
-func TestResetMetricWithMetadatasProtoOnlyForwardedMetric(t *testing.T) {
+func TestReuseMetricWithMetadatasProtoOnlyForwardedMetric(t *testing.T) {
input := &metricpb.MetricWithMetadatas{
Type: metricpb.MetricWithMetadatas_FORWARDED_METRIC_WITH_METADATA,
ForwardedMetricWithMetadata: &metricpb.ForwardedMetricWithMetadata{
@@ -223,14 +223,14 @@ func TestResetMetricWithMetadatasProtoOnlyForwardedMetric(t *testing.T) {
Metadata: testForwardMetadataAfterResetProto,
},
}
- resetMetricWithMetadatasProto(input)
+ ReuseMetricWithMetadatasProto(input)
require.Equal(t, expected, input)
require.True(t, cap(input.ForwardedMetricWithMetadata.Metric.Id) > 0)
require.True(t, cap(input.ForwardedMetricWithMetadata.Metric.Values) > 0)
require.True(t, cap(input.ForwardedMetricWithMetadata.Metadata.Pipeline.Ops) > 0)
}
-func TestResetMetricWithMetadatasProtoAll(t *testing.T) {
+func TestReuseMetricWithMetadatasProtoAll(t *testing.T) {
input := &metricpb.MetricWithMetadatas{
Type: metricpb.MetricWithMetadatas_GAUGE_WITH_METADATAS,
CounterWithMetadatas: &metricpb.CounterWithMetadatas{
@@ -269,7 +269,7 @@ func TestResetMetricWithMetadatasProtoAll(t *testing.T) {
Metadata: testForwardMetadataAfterResetProto,
},
}
- resetMetricWithMetadatasProto(input)
+ ReuseMetricWithMetadatasProto(input)
require.Equal(t, expected, input)
require.True(t, cap(input.CounterWithMetadatas.Counter.Id) > 0)
require.True(t, cap(input.CounterWithMetadatas.Metadatas.Metadatas) > 0)
diff --git a/src/metrics/encoding/protobuf/unaggregated_encoder.go b/src/metrics/encoding/protobuf/unaggregated_encoder.go
index d2c4d89bad..cee6855a4b 100644
--- a/src/metrics/encoding/protobuf/unaggregated_encoder.go
+++ b/src/metrics/encoding/protobuf/unaggregated_encoder.go
@@ -68,6 +68,8 @@ type unaggregatedEncoder struct {
gm metricpb.GaugeWithMetadatas
fm metricpb.ForwardedMetricWithMetadata
tm metricpb.TimedMetricWithMetadata
+ tms metricpb.TimedMetricWithMetadatas
+ pm metricpb.TimedMetricWithStoragePolicy
buf []byte
used int
@@ -127,6 +129,10 @@ func (enc *unaggregatedEncoder) EncodeMessage(msg encoding.UnaggregatedMessageUn
return enc.encodeForwardedMetricWithMetadata(msg.ForwardedMetricWithMetadata)
case encoding.TimedMetricWithMetadataType:
return enc.encodeTimedMetricWithMetadata(msg.TimedMetricWithMetadata)
+ case encoding.TimedMetricWithMetadatasType:
+ return enc.encodeTimedMetricWithMetadatas(msg.TimedMetricWithMetadatas)
+ case encoding.PassthroughMetricWithMetadataType:
+ return enc.encodePassthroughMetricWithMetadata(msg.PassthroughMetricWithMetadata)
default:
return fmt.Errorf("unknown message type: %v", msg.Type)
}
@@ -187,6 +193,28 @@ func (enc *unaggregatedEncoder) encodeTimedMetricWithMetadata(tm aggregated.Time
return enc.encodeMetricWithMetadatas(mm)
}
+func (enc *unaggregatedEncoder) encodeTimedMetricWithMetadatas(tms aggregated.TimedMetricWithMetadatas) error {
+ if err := tms.ToProto(&enc.tms); err != nil {
+ return fmt.Errorf("timed metric with metadatas proto conversion failed: %v", err)
+ }
+ mm := metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATAS,
+ TimedMetricWithMetadatas: &enc.tms,
+ }
+ return enc.encodeMetricWithMetadatas(mm)
+}
+
+func (enc *unaggregatedEncoder) encodePassthroughMetricWithMetadata(pm aggregated.PassthroughMetricWithMetadata) error {
+ if err := pm.ToProto(&enc.pm); err != nil {
+ return fmt.Errorf("passthrough metric with metadata proto conversion failed: %v", err)
+ }
+ mm := metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_STORAGE_POLICY,
+ TimedMetricWithStoragePolicy: &enc.pm,
+ }
+ return enc.encodeMetricWithMetadatas(mm)
+}
+
func (enc *unaggregatedEncoder) encodeMetricWithMetadatas(pb metricpb.MetricWithMetadatas) error {
msgSize := pb.Size()
if msgSize > enc.maxMessageSize {
diff --git a/src/metrics/encoding/protobuf/unaggregated_encoder_test.go b/src/metrics/encoding/protobuf/unaggregated_encoder_test.go
index eaf67fe829..57364bd604 100644
--- a/src/metrics/encoding/protobuf/unaggregated_encoder_test.go
+++ b/src/metrics/encoding/protobuf/unaggregated_encoder_test.go
@@ -96,6 +96,18 @@ var (
TimeNanos: 82590,
Value: 0,
}
+ testPassthroughMetric1 = aggregated.Metric{
+ Type: metric.CounterType,
+ ID: []byte("testPassthroughMetric1"),
+ TimeNanos: 11111,
+ Value: 1,
+ }
+ testPassthroughMetric2 = aggregated.Metric{
+ Type: metric.GaugeType,
+ ID: []byte("testPassthroughMetric2"),
+ TimeNanos: 22222,
+ Value: 2,
+ }
testStagedMetadatas1 = metadata.StagedMetadatas{
{
CutoverNanos: 1234,
@@ -239,6 +251,8 @@ var (
AggregationID: aggregation.MustCompressTypes(aggregation.Sum),
StoragePolicy: policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour),
}
+ testPassthroughMetadata1 = policy.NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour)
+ testPassthroughMetadata2 = policy.NewStoragePolicy(10*time.Second, xtime.Second, 6*time.Hour)
testCounter1Proto = metricpb.Counter{
Id: []byte("testCounter1"),
Value: 123,
@@ -287,6 +301,18 @@ var (
TimeNanos: 82590,
Value: 0,
}
+ testPassthroughMetric1Proto = metricpb.TimedMetric{
+ Type: metricpb.MetricType_COUNTER,
+ Id: []byte("testPassthroughMetric1"),
+ TimeNanos: 11111,
+ Value: 1,
+ }
+ testPassthroughMetric2Proto = metricpb.TimedMetric{
+ Type: metricpb.MetricType_GAUGE,
+ Id: []byte("testPassthroughMetric2"),
+ TimeNanos: 22222,
+ Value: 2,
+ }
testStagedMetadatas1Proto = metricpb.StagedMetadatas{
Metadatas: []metricpb.StagedMetadata{
{
@@ -516,6 +542,24 @@ var (
},
},
}
+ testPassthroughMetadata1Proto = policypb.StoragePolicy{
+ Resolution: &policypb.Resolution{
+ WindowSize: time.Minute.Nanoseconds(),
+ Precision: time.Minute.Nanoseconds(),
+ },
+ Retention: &policypb.Retention{
+ Period: (12 * time.Hour).Nanoseconds(),
+ },
+ }
+ testPassthroughMetadata2Proto = policypb.StoragePolicy{
+ Resolution: &policypb.Resolution{
+ WindowSize: 10 * time.Second.Nanoseconds(),
+ Precision: time.Second.Nanoseconds(),
+ },
+ Retention: &policypb.Retention{
+ Period: (6 * time.Hour).Nanoseconds(),
+ },
+ }
testCmpOpts = []cmp.Option{
cmpopts.EquateEmpty(),
cmp.AllowUnexported(policy.StoragePolicy{}),
@@ -822,6 +866,66 @@ func TestUnaggregatedEncoderEncodeTimedMetricWithMetadata(t *testing.T) {
}
}
+func TestUnaggregatedEncoderEncodePassthroughMetricWithMetadata(t *testing.T) {
+ inputs := []aggregated.PassthroughMetricWithMetadata{
+ {
+ Metric: testPassthroughMetric1,
+ StoragePolicy: testPassthroughMetadata1,
+ },
+ {
+ Metric: testPassthroughMetric1,
+ StoragePolicy: testPassthroughMetadata2,
+ },
+ {
+ Metric: testPassthroughMetric2,
+ StoragePolicy: testPassthroughMetadata1,
+ },
+ {
+ Metric: testPassthroughMetric2,
+ StoragePolicy: testPassthroughMetadata2,
+ },
+ }
+ expected := []metricpb.TimedMetricWithStoragePolicy{
+ {
+ TimedMetric: testPassthroughMetric1Proto,
+ StoragePolicy: testPassthroughMetadata1Proto,
+ },
+ {
+ TimedMetric: testPassthroughMetric1Proto,
+ StoragePolicy: testPassthroughMetadata2Proto,
+ },
+ {
+ TimedMetric: testPassthroughMetric2Proto,
+ StoragePolicy: testPassthroughMetadata1Proto,
+ },
+ {
+ TimedMetric: testPassthroughMetric2Proto,
+ StoragePolicy: testPassthroughMetadata2Proto,
+ },
+ }
+
+ var (
+ sizeRes int
+ pbRes metricpb.MetricWithMetadatas
+ )
+ enc := NewUnaggregatedEncoder(NewUnaggregatedOptions())
+ enc.(*unaggregatedEncoder).encodeMessageSizeFn = func(size int) { sizeRes = size }
+ enc.(*unaggregatedEncoder).encodeMessageFn = func(pb metricpb.MetricWithMetadatas) error { pbRes = pb; return nil }
+ for i, input := range inputs {
+ require.NoError(t, enc.EncodeMessage(encoding.UnaggregatedMessageUnion{
+ Type: encoding.PassthroughMetricWithMetadataType,
+ PassthroughMetricWithMetadata: input,
+ }))
+ expectedProto := metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_STORAGE_POLICY,
+ TimedMetricWithStoragePolicy: &expected[i],
+ }
+ expectedMsgSize := expectedProto.Size()
+ require.Equal(t, expectedMsgSize, sizeRes)
+ require.Equal(t, expectedProto, pbRes)
+ }
+}
+
func TestUnaggregatedEncoderStress(t *testing.T) {
inputs := []interface{}{
unaggregated.CounterWithMetadatas{
@@ -844,6 +948,10 @@ func TestUnaggregatedEncoderStress(t *testing.T) {
Metric: testTimedMetric1,
TimedMetadata: testTimedMetadata1,
},
+ aggregated.PassthroughMetricWithMetadata{
+ Metric: testPassthroughMetric1,
+ StoragePolicy: testPassthroughMetadata1,
+ },
unaggregated.CounterWithMetadatas{
Counter: testCounter2,
StagedMetadatas: testStagedMetadatas1,
@@ -896,6 +1004,10 @@ func TestUnaggregatedEncoderStress(t *testing.T) {
Metric: testTimedMetric2,
TimedMetadata: testTimedMetadata2,
},
+ aggregated.PassthroughMetricWithMetadata{
+ Metric: testPassthroughMetric2,
+ StoragePolicy: testPassthroughMetadata2,
+ },
}
expected := []interface{}{
@@ -919,6 +1031,10 @@ func TestUnaggregatedEncoderStress(t *testing.T) {
Metric: testTimedMetric1Proto,
Metadata: testTimedMetadata1Proto,
},
+ metricpb.TimedMetricWithStoragePolicy{
+ TimedMetric: testPassthroughMetric1Proto,
+ StoragePolicy: testPassthroughMetadata1Proto,
+ },
metricpb.CounterWithMetadatas{
Counter: testCounter2Proto,
Metadatas: testStagedMetadatas1Proto,
@@ -971,6 +1087,10 @@ func TestUnaggregatedEncoderStress(t *testing.T) {
Metric: testTimedMetric2Proto,
Metadata: testTimedMetadata2Proto,
},
+ metricpb.TimedMetricWithStoragePolicy{
+ TimedMetric: testPassthroughMetric2Proto,
+ StoragePolicy: testPassthroughMetadata2Proto,
+ },
}
var (
@@ -1039,6 +1159,16 @@ func TestUnaggregatedEncoderStress(t *testing.T) {
Type: metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATA,
TimedMetricWithMetadata: &res,
}
+ case aggregated.PassthroughMetricWithMetadata:
+ msg = encoding.UnaggregatedMessageUnion{
+ Type: encoding.PassthroughMetricWithMetadataType,
+ PassthroughMetricWithMetadata: input,
+ }
+ res := expected[i].(metricpb.TimedMetricWithStoragePolicy)
+ expectedProto = metricpb.MetricWithMetadatas{
+ Type: metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_STORAGE_POLICY,
+ TimedMetricWithStoragePolicy: &res,
+ }
default:
require.Fail(t, "unrecognized type %T", input)
}
diff --git a/src/metrics/encoding/protobuf/unaggregated_iterator.go b/src/metrics/encoding/protobuf/unaggregated_iterator.go
index d04bd618b4..81d9038f76 100644
--- a/src/metrics/encoding/protobuf/unaggregated_iterator.go
+++ b/src/metrics/encoding/protobuf/unaggregated_iterator.go
@@ -128,7 +128,7 @@ func (it *unaggregatedIterator) decodeMessage(size int) error {
it.err = err
return err
}
- resetMetricWithMetadatasProto(&it.pb)
+ ReuseMetricWithMetadatasProto(&it.pb)
if err := it.pb.Unmarshal(it.buf[:size]); err != nil {
it.err = err
return err
@@ -149,6 +149,12 @@ func (it *unaggregatedIterator) decodeMessage(size int) error {
case metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATA:
it.msg.Type = encoding.TimedMetricWithMetadataType
it.err = it.msg.TimedMetricWithMetadata.FromProto(it.pb.TimedMetricWithMetadata)
+ case metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_METADATAS:
+ it.msg.Type = encoding.TimedMetricWithMetadatasType
+ it.err = it.msg.TimedMetricWithMetadatas.FromProto(it.pb.TimedMetricWithMetadatas)
+ case metricpb.MetricWithMetadatas_TIMED_METRIC_WITH_STORAGE_POLICY:
+ it.msg.Type = encoding.PassthroughMetricWithMetadataType
+ it.err = it.msg.PassthroughMetricWithMetadata.FromProto(it.pb.TimedMetricWithStoragePolicy)
default:
it.err = fmt.Errorf("unrecognized message type: %v", it.pb.Type)
}
diff --git a/src/metrics/encoding/protobuf/unaggregated_iterator_test.go b/src/metrics/encoding/protobuf/unaggregated_iterator_test.go
index ed9e517089..523e923b19 100644
--- a/src/metrics/encoding/protobuf/unaggregated_iterator_test.go
+++ b/src/metrics/encoding/protobuf/unaggregated_iterator_test.go
@@ -217,6 +217,51 @@ func TestUnaggregatedIteratorDecodeForwardedMetricWithMetadata(t *testing.T) {
require.Equal(t, io.EOF, it.Err())
require.Equal(t, len(inputs), i)
}
+func TestUnaggregatedIteratorDecodePassthroughMetricWithMetadata(t *testing.T) {
+ inputs := []aggregated.PassthroughMetricWithMetadata{
+ {
+ Metric: testPassthroughMetric1,
+ StoragePolicy: testPassthroughMetadata1,
+ },
+ {
+ Metric: testPassthroughMetric2,
+ StoragePolicy: testPassthroughMetadata1,
+ },
+ {
+ Metric: testPassthroughMetric1,
+ StoragePolicy: testPassthroughMetadata2,
+ },
+ {
+ Metric: testPassthroughMetric2,
+ StoragePolicy: testPassthroughMetadata2,
+ },
+ }
+
+ enc := NewUnaggregatedEncoder(NewUnaggregatedOptions())
+ for _, input := range inputs {
+ require.NoError(t, enc.EncodeMessage(encoding.UnaggregatedMessageUnion{
+ Type: encoding.PassthroughMetricWithMetadataType,
+ PassthroughMetricWithMetadata: input,
+ }))
+ }
+ dataBuf := enc.Relinquish()
+ defer dataBuf.Close()
+
+ var (
+ i int
+ stream = bytes.NewReader(dataBuf.Bytes())
+ )
+ it := NewUnaggregatedIterator(stream, NewUnaggregatedOptions())
+ defer it.Close()
+ for it.Next() {
+ res := it.Current()
+ require.Equal(t, encoding.PassthroughMetricWithMetadataType, res.Type)
+ require.Equal(t, inputs[i], res.PassthroughMetricWithMetadata)
+ i++
+ }
+ require.Equal(t, io.EOF, it.Err())
+ require.Equal(t, len(inputs), i)
+}
func TestUnaggregatedIteratorDecodeTimedMetricWithMetadata(t *testing.T) {
inputs := []aggregated.TimedMetricWithMetadata{
@@ -286,6 +331,10 @@ func TestUnaggregatedIteratorDecodeStress(t *testing.T) {
Metric: testTimedMetric1,
TimedMetadata: testTimedMetadata1,
},
+ aggregated.PassthroughMetricWithMetadata{
+ Metric: testPassthroughMetric1,
+ StoragePolicy: testPassthroughMetadata1,
+ },
unaggregated.CounterWithMetadatas{
Counter: testCounter2,
StagedMetadatas: testStagedMetadatas1,
@@ -338,6 +387,10 @@ func TestUnaggregatedIteratorDecodeStress(t *testing.T) {
Metric: testTimedMetric2,
TimedMetadata: testTimedMetadata2,
},
+ aggregated.PassthroughMetricWithMetadata{
+ Metric: testPassthroughMetric2,
+ StoragePolicy: testPassthroughMetadata2,
+ },
}
numIter := 1000
@@ -371,6 +424,11 @@ func TestUnaggregatedIteratorDecodeStress(t *testing.T) {
Type: encoding.TimedMetricWithMetadataType,
TimedMetricWithMetadata: input,
}
+ case aggregated.PassthroughMetricWithMetadata:
+ msg = encoding.UnaggregatedMessageUnion{
+ Type: encoding.PassthroughMetricWithMetadataType,
+ PassthroughMetricWithMetadata: input,
+ }
default:
require.Fail(t, "unrecognized type %T", input)
}
@@ -405,6 +463,9 @@ func TestUnaggregatedIteratorDecodeStress(t *testing.T) {
case aggregated.TimedMetricWithMetadata:
require.Equal(t, encoding.TimedMetricWithMetadataType, res.Type)
require.True(t, cmp.Equal(expectedRes, res.TimedMetricWithMetadata, testCmpOpts...))
+ case aggregated.PassthroughMetricWithMetadata:
+ require.Equal(t, encoding.PassthroughMetricWithMetadataType, res.Type)
+ require.True(t, cmp.Equal(expectedRes, res.PassthroughMetricWithMetadata, testCmpOpts...))
default:
require.Fail(t, "unknown input type: %T", inputs[j])
}
diff --git a/src/metrics/encoding/types.go b/src/metrics/encoding/types.go
index 266ac05a8e..2f361502e3 100644
--- a/src/metrics/encoding/types.go
+++ b/src/metrics/encoding/types.go
@@ -38,6 +38,8 @@ const (
GaugeWithMetadatasType
ForwardedMetricWithMetadataType
TimedMetricWithMetadataType
+ TimedMetricWithMetadatasType
+ PassthroughMetricWithMetadataType
)
// UnaggregatedMessageUnion is a union of different types of unaggregated messages.
@@ -45,12 +47,14 @@ const (
// by the `Type` field of the union, which in turn determines which one
// of the field in the union contains the corresponding message data.
type UnaggregatedMessageUnion struct {
- Type UnaggregatedMessageType
- CounterWithMetadatas unaggregated.CounterWithMetadatas
- BatchTimerWithMetadatas unaggregated.BatchTimerWithMetadatas
- GaugeWithMetadatas unaggregated.GaugeWithMetadatas
- ForwardedMetricWithMetadata aggregated.ForwardedMetricWithMetadata
- TimedMetricWithMetadata aggregated.TimedMetricWithMetadata
+ Type UnaggregatedMessageType
+ CounterWithMetadatas unaggregated.CounterWithMetadatas
+ BatchTimerWithMetadatas unaggregated.BatchTimerWithMetadatas
+ GaugeWithMetadatas unaggregated.GaugeWithMetadatas
+ ForwardedMetricWithMetadata aggregated.ForwardedMetricWithMetadata
+ TimedMetricWithMetadata aggregated.TimedMetricWithMetadata
+ TimedMetricWithMetadatas aggregated.TimedMetricWithMetadatas
+ PassthroughMetricWithMetadata aggregated.PassthroughMetricWithMetadata
}
// ByteReadScanner is capable of reading and scanning bytes.
diff --git a/src/metrics/generated/proto/metricpb/composite.pb.go b/src/metrics/generated/proto/metricpb/composite.pb.go
index 8bdb013320..ad2c896bd2 100644
--- a/src/metrics/generated/proto/metricpb/composite.pb.go
+++ b/src/metrics/generated/proto/metricpb/composite.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/metrics/generated/proto/metricpb/composite.proto
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -35,6 +35,7 @@
GaugeWithMetadatas
ForwardedMetricWithMetadata
TimedMetricWithMetadata
+ TimedMetricWithMetadatas
TimedMetricWithStoragePolicy
AggregatedMetric
MetricWithMetadatas
@@ -49,6 +50,7 @@
Gauge
TimedMetric
ForwardedMetric
+ Tag
*/
package metricpb
@@ -74,12 +76,14 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type MetricWithMetadatas_Type int32
const (
- MetricWithMetadatas_UNKNOWN MetricWithMetadatas_Type = 0
- MetricWithMetadatas_COUNTER_WITH_METADATAS MetricWithMetadatas_Type = 1
- MetricWithMetadatas_BATCH_TIMER_WITH_METADATAS MetricWithMetadatas_Type = 2
- MetricWithMetadatas_GAUGE_WITH_METADATAS MetricWithMetadatas_Type = 3
- MetricWithMetadatas_FORWARDED_METRIC_WITH_METADATA MetricWithMetadatas_Type = 4
- MetricWithMetadatas_TIMED_METRIC_WITH_METADATA MetricWithMetadatas_Type = 5
+ MetricWithMetadatas_UNKNOWN MetricWithMetadatas_Type = 0
+ MetricWithMetadatas_COUNTER_WITH_METADATAS MetricWithMetadatas_Type = 1
+ MetricWithMetadatas_BATCH_TIMER_WITH_METADATAS MetricWithMetadatas_Type = 2
+ MetricWithMetadatas_GAUGE_WITH_METADATAS MetricWithMetadatas_Type = 3
+ MetricWithMetadatas_FORWARDED_METRIC_WITH_METADATA MetricWithMetadatas_Type = 4
+ MetricWithMetadatas_TIMED_METRIC_WITH_METADATA MetricWithMetadatas_Type = 5
+ MetricWithMetadatas_TIMED_METRIC_WITH_METADATAS MetricWithMetadatas_Type = 6
+ MetricWithMetadatas_TIMED_METRIC_WITH_STORAGE_POLICY MetricWithMetadatas_Type = 7
)
var MetricWithMetadatas_Type_name = map[int32]string{
@@ -89,21 +93,25 @@ var MetricWithMetadatas_Type_name = map[int32]string{
3: "GAUGE_WITH_METADATAS",
4: "FORWARDED_METRIC_WITH_METADATA",
5: "TIMED_METRIC_WITH_METADATA",
+ 6: "TIMED_METRIC_WITH_METADATAS",
+ 7: "TIMED_METRIC_WITH_STORAGE_POLICY",
}
var MetricWithMetadatas_Type_value = map[string]int32{
- "UNKNOWN": 0,
- "COUNTER_WITH_METADATAS": 1,
- "BATCH_TIMER_WITH_METADATAS": 2,
- "GAUGE_WITH_METADATAS": 3,
- "FORWARDED_METRIC_WITH_METADATA": 4,
- "TIMED_METRIC_WITH_METADATA": 5,
+ "UNKNOWN": 0,
+ "COUNTER_WITH_METADATAS": 1,
+ "BATCH_TIMER_WITH_METADATAS": 2,
+ "GAUGE_WITH_METADATAS": 3,
+ "FORWARDED_METRIC_WITH_METADATA": 4,
+ "TIMED_METRIC_WITH_METADATA": 5,
+ "TIMED_METRIC_WITH_METADATAS": 6,
+ "TIMED_METRIC_WITH_STORAGE_POLICY": 7,
}
func (x MetricWithMetadatas_Type) String() string {
return proto.EnumName(MetricWithMetadatas_Type_name, int32(x))
}
func (MetricWithMetadatas_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptorComposite, []int{7, 0}
+ return fileDescriptorComposite, []int{8, 0}
}
type CounterWithMetadatas struct {
@@ -228,6 +236,32 @@ func (m *TimedMetricWithMetadata) GetMetadata() TimedMetadata {
return TimedMetadata{}
}
+type TimedMetricWithMetadatas struct {
+ Metric TimedMetric `protobuf:"bytes,1,opt,name=metric" json:"metric"`
+ Metadatas StagedMetadatas `protobuf:"bytes,2,opt,name=metadatas" json:"metadatas"`
+}
+
+func (m *TimedMetricWithMetadatas) Reset() { *m = TimedMetricWithMetadatas{} }
+func (m *TimedMetricWithMetadatas) String() string { return proto.CompactTextString(m) }
+func (*TimedMetricWithMetadatas) ProtoMessage() {}
+func (*TimedMetricWithMetadatas) Descriptor() ([]byte, []int) {
+ return fileDescriptorComposite, []int{5}
+}
+
+func (m *TimedMetricWithMetadatas) GetMetric() TimedMetric {
+ if m != nil {
+ return m.Metric
+ }
+ return TimedMetric{}
+}
+
+func (m *TimedMetricWithMetadatas) GetMetadatas() StagedMetadatas {
+ if m != nil {
+ return m.Metadatas
+ }
+ return StagedMetadatas{}
+}
+
type TimedMetricWithStoragePolicy struct {
TimedMetric TimedMetric `protobuf:"bytes,1,opt,name=timed_metric,json=timedMetric" json:"timed_metric"`
StoragePolicy policypb.StoragePolicy `protobuf:"bytes,2,opt,name=storage_policy,json=storagePolicy" json:"storage_policy"`
@@ -237,7 +271,7 @@ func (m *TimedMetricWithStoragePolicy) Reset() { *m = TimedMetricWithSto
func (m *TimedMetricWithStoragePolicy) String() string { return proto.CompactTextString(m) }
func (*TimedMetricWithStoragePolicy) ProtoMessage() {}
func (*TimedMetricWithStoragePolicy) Descriptor() ([]byte, []int) {
- return fileDescriptorComposite, []int{5}
+ return fileDescriptorComposite, []int{6}
}
func (m *TimedMetricWithStoragePolicy) GetTimedMetric() TimedMetric {
@@ -262,7 +296,7 @@ type AggregatedMetric struct {
func (m *AggregatedMetric) Reset() { *m = AggregatedMetric{} }
func (m *AggregatedMetric) String() string { return proto.CompactTextString(m) }
func (*AggregatedMetric) ProtoMessage() {}
-func (*AggregatedMetric) Descriptor() ([]byte, []int) { return fileDescriptorComposite, []int{6} }
+func (*AggregatedMetric) Descriptor() ([]byte, []int) { return fileDescriptorComposite, []int{7} }
func (m *AggregatedMetric) GetMetric() TimedMetricWithStoragePolicy {
if m != nil {
@@ -285,18 +319,20 @@ func (m *AggregatedMetric) GetEncodeNanos() int64 {
// significant performance hit when such message type is used for encoding
// and decoding high volume traffic.
type MetricWithMetadatas struct {
- Type MetricWithMetadatas_Type `protobuf:"varint,1,opt,name=type,proto3,enum=metricpb.MetricWithMetadatas_Type" json:"type,omitempty"`
- CounterWithMetadatas *CounterWithMetadatas `protobuf:"bytes,2,opt,name=counter_with_metadatas,json=counterWithMetadatas" json:"counter_with_metadatas,omitempty"`
- BatchTimerWithMetadatas *BatchTimerWithMetadatas `protobuf:"bytes,3,opt,name=batch_timer_with_metadatas,json=batchTimerWithMetadatas" json:"batch_timer_with_metadatas,omitempty"`
- GaugeWithMetadatas *GaugeWithMetadatas `protobuf:"bytes,4,opt,name=gauge_with_metadatas,json=gaugeWithMetadatas" json:"gauge_with_metadatas,omitempty"`
- ForwardedMetricWithMetadata *ForwardedMetricWithMetadata `protobuf:"bytes,5,opt,name=forwarded_metric_with_metadata,json=forwardedMetricWithMetadata" json:"forwarded_metric_with_metadata,omitempty"`
- TimedMetricWithMetadata *TimedMetricWithMetadata `protobuf:"bytes,6,opt,name=timed_metric_with_metadata,json=timedMetricWithMetadata" json:"timed_metric_with_metadata,omitempty"`
+ Type MetricWithMetadatas_Type `protobuf:"varint,1,opt,name=type,proto3,enum=metricpb.MetricWithMetadatas_Type" json:"type,omitempty"`
+ CounterWithMetadatas *CounterWithMetadatas `protobuf:"bytes,2,opt,name=counter_with_metadatas,json=counterWithMetadatas" json:"counter_with_metadatas,omitempty"`
+ BatchTimerWithMetadatas *BatchTimerWithMetadatas `protobuf:"bytes,3,opt,name=batch_timer_with_metadatas,json=batchTimerWithMetadatas" json:"batch_timer_with_metadatas,omitempty"`
+ GaugeWithMetadatas *GaugeWithMetadatas `protobuf:"bytes,4,opt,name=gauge_with_metadatas,json=gaugeWithMetadatas" json:"gauge_with_metadatas,omitempty"`
+ ForwardedMetricWithMetadata *ForwardedMetricWithMetadata `protobuf:"bytes,5,opt,name=forwarded_metric_with_metadata,json=forwardedMetricWithMetadata" json:"forwarded_metric_with_metadata,omitempty"`
+ TimedMetricWithMetadata *TimedMetricWithMetadata `protobuf:"bytes,6,opt,name=timed_metric_with_metadata,json=timedMetricWithMetadata" json:"timed_metric_with_metadata,omitempty"`
+ TimedMetricWithMetadatas *TimedMetricWithMetadatas `protobuf:"bytes,7,opt,name=timed_metric_with_metadatas,json=timedMetricWithMetadatas" json:"timed_metric_with_metadatas,omitempty"`
+ TimedMetricWithStoragePolicy *TimedMetricWithStoragePolicy `protobuf:"bytes,8,opt,name=timed_metric_with_storage_policy,json=timedMetricWithStoragePolicy" json:"timed_metric_with_storage_policy,omitempty"`
}
func (m *MetricWithMetadatas) Reset() { *m = MetricWithMetadatas{} }
func (m *MetricWithMetadatas) String() string { return proto.CompactTextString(m) }
func (*MetricWithMetadatas) ProtoMessage() {}
-func (*MetricWithMetadatas) Descriptor() ([]byte, []int) { return fileDescriptorComposite, []int{7} }
+func (*MetricWithMetadatas) Descriptor() ([]byte, []int) { return fileDescriptorComposite, []int{8} }
func (m *MetricWithMetadatas) GetType() MetricWithMetadatas_Type {
if m != nil {
@@ -340,12 +376,27 @@ func (m *MetricWithMetadatas) GetTimedMetricWithMetadata() *TimedMetricWithMetad
return nil
}
+func (m *MetricWithMetadatas) GetTimedMetricWithMetadatas() *TimedMetricWithMetadatas {
+ if m != nil {
+ return m.TimedMetricWithMetadatas
+ }
+ return nil
+}
+
+func (m *MetricWithMetadatas) GetTimedMetricWithStoragePolicy() *TimedMetricWithStoragePolicy {
+ if m != nil {
+ return m.TimedMetricWithStoragePolicy
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*CounterWithMetadatas)(nil), "metricpb.CounterWithMetadatas")
proto.RegisterType((*BatchTimerWithMetadatas)(nil), "metricpb.BatchTimerWithMetadatas")
proto.RegisterType((*GaugeWithMetadatas)(nil), "metricpb.GaugeWithMetadatas")
proto.RegisterType((*ForwardedMetricWithMetadata)(nil), "metricpb.ForwardedMetricWithMetadata")
proto.RegisterType((*TimedMetricWithMetadata)(nil), "metricpb.TimedMetricWithMetadata")
+ proto.RegisterType((*TimedMetricWithMetadatas)(nil), "metricpb.TimedMetricWithMetadatas")
proto.RegisterType((*TimedMetricWithStoragePolicy)(nil), "metricpb.TimedMetricWithStoragePolicy")
proto.RegisterType((*AggregatedMetric)(nil), "metricpb.AggregatedMetric")
proto.RegisterType((*MetricWithMetadatas)(nil), "metricpb.MetricWithMetadatas")
@@ -521,6 +572,40 @@ func (m *TimedMetricWithMetadata) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *TimedMetricWithMetadatas) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TimedMetricWithMetadatas) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintComposite(dAtA, i, uint64(m.Metric.Size()))
+ n11, err := m.Metric.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintComposite(dAtA, i, uint64(m.Metadatas.Size()))
+ n12, err := m.Metadatas.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ return i, nil
+}
+
func (m *TimedMetricWithStoragePolicy) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -539,19 +624,19 @@ func (m *TimedMetricWithStoragePolicy) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintComposite(dAtA, i, uint64(m.TimedMetric.Size()))
- n11, err := m.TimedMetric.MarshalTo(dAtA[i:])
+ n13, err := m.TimedMetric.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n11
+ i += n13
dAtA[i] = 0x12
i++
i = encodeVarintComposite(dAtA, i, uint64(m.StoragePolicy.Size()))
- n12, err := m.StoragePolicy.MarshalTo(dAtA[i:])
+ n14, err := m.StoragePolicy.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n12
+ i += n14
return i, nil
}
@@ -573,11 +658,11 @@ func (m *AggregatedMetric) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintComposite(dAtA, i, uint64(m.Metric.Size()))
- n13, err := m.Metric.MarshalTo(dAtA[i:])
+ n15, err := m.Metric.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n13
+ i += n15
if m.EncodeNanos != 0 {
dAtA[i] = 0x10
i++
@@ -610,51 +695,71 @@ func (m *MetricWithMetadatas) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintComposite(dAtA, i, uint64(m.CounterWithMetadatas.Size()))
- n14, err := m.CounterWithMetadatas.MarshalTo(dAtA[i:])
+ n16, err := m.CounterWithMetadatas.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n14
+ i += n16
}
if m.BatchTimerWithMetadatas != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintComposite(dAtA, i, uint64(m.BatchTimerWithMetadatas.Size()))
- n15, err := m.BatchTimerWithMetadatas.MarshalTo(dAtA[i:])
+ n17, err := m.BatchTimerWithMetadatas.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n15
+ i += n17
}
if m.GaugeWithMetadatas != nil {
dAtA[i] = 0x22
i++
i = encodeVarintComposite(dAtA, i, uint64(m.GaugeWithMetadatas.Size()))
- n16, err := m.GaugeWithMetadatas.MarshalTo(dAtA[i:])
+ n18, err := m.GaugeWithMetadatas.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n16
+ i += n18
}
if m.ForwardedMetricWithMetadata != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintComposite(dAtA, i, uint64(m.ForwardedMetricWithMetadata.Size()))
- n17, err := m.ForwardedMetricWithMetadata.MarshalTo(dAtA[i:])
+ n19, err := m.ForwardedMetricWithMetadata.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n17
+ i += n19
}
if m.TimedMetricWithMetadata != nil {
dAtA[i] = 0x32
i++
i = encodeVarintComposite(dAtA, i, uint64(m.TimedMetricWithMetadata.Size()))
- n18, err := m.TimedMetricWithMetadata.MarshalTo(dAtA[i:])
+ n20, err := m.TimedMetricWithMetadata.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n18
+ i += n20
+ }
+ if m.TimedMetricWithMetadatas != nil {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintComposite(dAtA, i, uint64(m.TimedMetricWithMetadatas.Size()))
+ n21, err := m.TimedMetricWithMetadatas.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ }
+ if m.TimedMetricWithStoragePolicy != nil {
+ dAtA[i] = 0x42
+ i++
+ i = encodeVarintComposite(dAtA, i, uint64(m.TimedMetricWithStoragePolicy.Size()))
+ n22, err := m.TimedMetricWithStoragePolicy.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
}
return i, nil
}
@@ -718,6 +823,16 @@ func (m *TimedMetricWithMetadata) Size() (n int) {
return n
}
+func (m *TimedMetricWithMetadatas) Size() (n int) {
+ var l int
+ _ = l
+ l = m.Metric.Size()
+ n += 1 + l + sovComposite(uint64(l))
+ l = m.Metadatas.Size()
+ n += 1 + l + sovComposite(uint64(l))
+ return n
+}
+
func (m *TimedMetricWithStoragePolicy) Size() (n int) {
var l int
_ = l
@@ -765,6 +880,14 @@ func (m *MetricWithMetadatas) Size() (n int) {
l = m.TimedMetricWithMetadata.Size()
n += 1 + l + sovComposite(uint64(l))
}
+ if m.TimedMetricWithMetadatas != nil {
+ l = m.TimedMetricWithMetadatas.Size()
+ n += 1 + l + sovComposite(uint64(l))
+ }
+ if m.TimedMetricWithStoragePolicy != nil {
+ l = m.TimedMetricWithStoragePolicy.Size()
+ n += 1 + l + sovComposite(uint64(l))
+ }
return n
}
@@ -1331,6 +1454,116 @@ func (m *TimedMetricWithMetadata) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *TimedMetricWithMetadatas) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowComposite
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TimedMetricWithMetadatas: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TimedMetricWithMetadatas: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowComposite
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthComposite
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadatas", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowComposite
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthComposite
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Metadatas.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipComposite(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthComposite
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *TimedMetricWithStoragePolicy) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -1753,6 +1986,72 @@ func (m *MetricWithMetadatas) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimedMetricWithMetadatas", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowComposite
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthComposite
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TimedMetricWithMetadatas == nil {
+ m.TimedMetricWithMetadatas = &TimedMetricWithMetadatas{}
+ }
+ if err := m.TimedMetricWithMetadatas.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimedMetricWithStoragePolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowComposite
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthComposite
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TimedMetricWithStoragePolicy == nil {
+ m.TimedMetricWithStoragePolicy = &TimedMetricWithStoragePolicy{}
+ }
+ if err := m.TimedMetricWithStoragePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipComposite(dAtA[iNdEx:])
@@ -1884,51 +2183,56 @@ func init() {
}
var fileDescriptorComposite = []byte{
- // 732 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcd, 0x6e, 0xd3, 0x5a,
- 0x10, 0xc7, 0xeb, 0x36, 0x69, 0x7b, 0x27, 0xbd, 0xbd, 0xb9, 0xe7, 0xe6, 0x36, 0x21, 0xad, 0x0c,
- 0xb5, 0x04, 0x42, 0x42, 0xc4, 0xa2, 0x91, 0xa8, 0x50, 0x05, 0x92, 0xf3, 0xd1, 0x34, 0x42, 0x4d,
- 0x91, 0xeb, 0x2a, 0x12, 0x8b, 0x5a, 0xfe, 0xaa, 0x63, 0x44, 0x62, 0xcb, 0x3e, 0x51, 0x55, 0xb1,
- 0x61, 0x09, 0x3b, 0x24, 0xc4, 0x1b, 0xb0, 0xe5, 0x3d, 0xba, 0x83, 0x27, 0x40, 0xa8, 0xbc, 0x08,
- 0xb2, 0x7d, 0x1c, 0xdb, 0xc7, 0x0e, 0x42, 0xed, 0xce, 0x9e, 0x99, 0xff, 0x6f, 0xfe, 0x39, 0x9e,
- 0x39, 0x81, 0x9e, 0x69, 0xe1, 0xd1, 0x54, 0x6d, 0x68, 0xf6, 0x98, 0x1f, 0x37, 0x75, 0x95, 0x1f,
- 0x37, 0x79, 0xcf, 0xd5, 0xf8, 0xb1, 0x81, 0x5d, 0x4b, 0xf3, 0x78, 0xd3, 0x98, 0x18, 0xae, 0x82,
- 0x0d, 0x9d, 0x77, 0x5c, 0x1b, 0xdb, 0x24, 0xee, 0xa8, 0xbc, 0x66, 0x8f, 0x1d, 0xdb, 0xb3, 0xb0,
- 0xd1, 0x08, 0x12, 0x68, 0x35, 0xca, 0xd4, 0x1f, 0x26, 0x90, 0xa6, 0x6d, 0xda, 0xa1, 0x52, 0x9d,
- 0x9e, 0x05, 0x6f, 0x21, 0xc6, 0x7f, 0x0a, 0x85, 0xf5, 0xce, 0x75, 0x1d, 0x84, 0x0f, 0x84, 0xb2,
- 0x7f, 0x03, 0x8a, 0xa2, 0x2b, 0x58, 0xb9, 0xa6, 0x1b, 0xc7, 0x7e, 0x6d, 0x69, 0x17, 0x8e, 0x4a,
- 0x1e, 0x42, 0x0a, 0xf7, 0x8e, 0x81, 0x4a, 0xdb, 0x9e, 0x4e, 0xb0, 0xe1, 0x0e, 0x2d, 0x3c, 0x3a,
- 0x24, 0x3d, 0x3c, 0xf4, 0x08, 0x56, 0xb4, 0x30, 0x5e, 0x63, 0xee, 0x30, 0xf7, 0x4b, 0x3b, 0xff,
- 0x36, 0x22, 0x27, 0x0d, 0x22, 0x68, 0x15, 0x2e, 0xbf, 0xdf, 0x5e, 0x10, 0xa3, 0x3a, 0xf4, 0x14,
- 0xfe, 0x8a, 0x3c, 0x7a, 0xb5, 0xc5, 0x40, 0x74, 0x2b, 0x16, 0x1d, 0x63, 0xc5, 0x34, 0xf4, 0x59,
- 0x03, 0x22, 0x8e, 0x15, 0xdc, 0x27, 0x06, 0xaa, 0x2d, 0x05, 0x6b, 0x23, 0xc9, 0x1a, 0xd3, 0x6e,
- 0xf6, 0xa0, 0xa4, 0xfa, 0x29, 0x19, 0xfb, 0x39, 0xe2, 0xa8, 0x12, 0xc3, 0x63, 0x1d, 0xe1, 0x82,
- 0x3a, 0x8b, 0xdc, 0xd4, 0xd7, 0x5b, 0x06, 0x50, 0x4f, 0x99, 0x9a, 0x46, 0xda, 0xd2, 0x03, 0x28,
- 0x9a, 0x7e, 0x94, 0x98, 0xf9, 0x27, 0x26, 0x06, 0xc5, 0x84, 0x13, 0xd6, 0xdc, 0xd4, 0xc2, 0x47,
- 0x06, 0x36, 0xf7, 0x6d, 0xf7, 0x5c, 0x71, 0xf5, 0xa0, 0xce, 0xb5, 0xb4, 0xa4, 0x19, 0xb4, 0x0b,
- 0xcb, 0x21, 0x8c, 0x98, 0x49, 0xb0, 0x29, 0x19, 0x61, 0x93, 0x72, 0xb4, 0x07, 0xab, 0x51, 0x97,
- 0xac, 0x2d, 0x22, 0x8d, 0xba, 0x10, 0xe9, 0x4c, 0xc0, 0xbd, 0x67, 0xa0, 0xea, 0x9f, 0x70, 0x9e,
- 0xa3, 0x26, 0xe5, 0xe8, 0xff, 0x18, 0x9b, 0x90, 0x50, 0x6e, 0x9e, 0x64, 0xdc, 0x54, 0xb3, 0xb2,
- 0x7c, 0x2f, 0x9f, 0x19, 0xd8, 0xa2, 0xbc, 0x1c, 0x63, 0xdb, 0x55, 0x4c, 0xe3, 0x45, 0x30, 0xee,
- 0xe8, 0x19, 0xac, 0xf9, 0xb3, 0xa3, 0xcb, 0x7f, 0x6e, 0xab, 0x84, 0xe3, 0x10, 0xea, 0xc0, 0xba,
- 0x17, 0x02, 0xe5, 0x70, 0x81, 0x66, 0x0e, 0xa3, 0xc5, 0x6a, 0xa4, 0x1a, 0x12, 0xc6, 0xdf, 0x5e,
- 0x32, 0xc8, 0xbd, 0x81, 0xb2, 0x60, 0x9a, 0xae, 0x61, 0xfa, 0x8b, 0x39, 0x23, 0xa7, 0x8f, 0xea,
- 0x5e, 0xae, 0xa7, 0xcc, 0x2f, 0xa2, 0xce, 0x6e, 0x1b, 0xd6, 0x8c, 0x89, 0x66, 0xeb, 0x86, 0x3c,
- 0x51, 0x26, 0x76, 0x38, 0x64, 0x4b, 0x62, 0x29, 0x8c, 0x0d, 0xfc, 0x10, 0xf7, 0xb5, 0x08, 0xff,
- 0x65, 0x3f, 0x95, 0x87, 0x1e, 0x43, 0x01, 0x5f, 0x38, 0xe1, 0x20, 0xaf, 0xef, 0x70, 0x71, 0xfb,
- 0x9c, 0xe2, 0x86, 0x74, 0xe1, 0x18, 0x62, 0x50, 0x8f, 0x24, 0xd8, 0x20, 0xab, 0x2f, 0x9f, 0x5b,
- 0x78, 0x24, 0xd3, 0x13, 0xce, 0x66, 0x6e, 0x8c, 0x14, 0x4a, 0xac, 0x68, 0x79, 0x17, 0xcf, 0x29,
- 0xd4, 0x13, 0xab, 0x4e, 0x93, 0x97, 0x02, 0xf2, 0x76, 0xde, 0xe6, 0xa7, 0xe1, 0x55, 0x75, 0xce,
- 0x55, 0x32, 0x80, 0x4a, 0xb0, 0x93, 0x34, 0xb9, 0x10, 0x90, 0xb7, 0xa8, 0x35, 0x4e, 0x43, 0x91,
- 0x99, 0xbd, 0x07, 0x5e, 0x01, 0x7b, 0x16, 0xed, 0x18, 0x19, 0xae, 0x34, 0xba, 0x56, 0x0c, 0xc8,
- 0x77, 0xe7, 0xee, 0x64, 0x92, 0x27, 0x6e, 0x9e, 0xfd, 0x66, 0xcf, 0x4f, 0xa1, 0x9e, 0x1c, 0x62,
- 0xaa, 0xcf, 0x32, 0x7d, 0x36, 0x73, 0x96, 0x53, 0xac, 0xe2, 0xfc, 0x04, 0xf7, 0x85, 0x81, 0x82,
- 0xff, 0x81, 0x51, 0x09, 0x56, 0x4e, 0x06, 0xcf, 0x07, 0x47, 0xc3, 0x41, 0x79, 0x01, 0xd5, 0x61,
- 0xa3, 0x7d, 0x74, 0x32, 0x90, 0xba, 0xa2, 0x3c, 0xec, 0x4b, 0x07, 0xf2, 0x61, 0x57, 0x12, 0x3a,
- 0x82, 0x24, 0x1c, 0x97, 0x19, 0xc4, 0x42, 0xbd, 0x25, 0x48, 0xed, 0x03, 0x59, 0xea, 0x1f, 0x66,
- 0xf3, 0x8b, 0xa8, 0x06, 0x95, 0x9e, 0x70, 0xd2, 0xeb, 0xd2, 0x99, 0x25, 0xc4, 0x01, 0xbb, 0x7f,
- 0x24, 0x0e, 0x05, 0xb1, 0xd3, 0xed, 0xf8, 0x09, 0xb1, 0xdf, 0x4e, 0x17, 0x95, 0x0b, 0x3e, 0xdd,
- 0xe7, 0xce, 0xc9, 0x17, 0x5b, 0xfd, 0xcb, 0x2b, 0x96, 0xf9, 0x76, 0xc5, 0x32, 0x3f, 0xae, 0x58,
- 0xe6, 0xc3, 0x4f, 0x76, 0xe1, 0xe5, 0xee, 0x35, 0xff, 0x5d, 0xd5, 0xe5, 0xe0, 0xbd, 0xf9, 0x2b,
- 0x00, 0x00, 0xff, 0xff, 0x1a, 0x70, 0xed, 0x30, 0x67, 0x08, 0x00, 0x00,
+ // 808 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x96, 0xcd, 0x6e, 0xd3, 0x58,
+ 0x14, 0xc7, 0xeb, 0x36, 0x4d, 0x3a, 0x27, 0x9d, 0x4e, 0xe6, 0x4e, 0xa6, 0xc9, 0x24, 0x95, 0xdb,
+ 0x5a, 0x33, 0x23, 0x24, 0x44, 0x22, 0x1a, 0x89, 0x0a, 0x55, 0x20, 0x39, 0x1f, 0x4d, 0x23, 0x68,
+ 0x52, 0x39, 0xae, 0x22, 0x58, 0xd4, 0xb2, 0x1d, 0xd7, 0x31, 0x22, 0x71, 0x64, 0xdf, 0xa8, 0xaa,
+ 0xd8, 0xb0, 0x84, 0x0d, 0x42, 0x42, 0xbc, 0x01, 0x0f, 0xd3, 0x25, 0x4f, 0x80, 0x50, 0x59, 0xb1,
+ 0xe7, 0x01, 0x90, 0xed, 0xeb, 0xf8, 0x2b, 0x2e, 0x90, 0xec, 0xec, 0xf3, 0xf1, 0x3b, 0xff, 0x5c,
+ 0xdf, 0xf3, 0x57, 0xa0, 0xa9, 0x6a, 0x78, 0x30, 0x91, 0x4a, 0xb2, 0x3e, 0x2c, 0x0f, 0x2b, 0x7d,
+ 0xa9, 0x3c, 0xac, 0x94, 0x4d, 0x43, 0x2e, 0x0f, 0x15, 0x6c, 0x68, 0xb2, 0x59, 0x56, 0x95, 0x91,
+ 0x62, 0x88, 0x58, 0xe9, 0x97, 0xc7, 0x86, 0x8e, 0x75, 0x12, 0x1f, 0x4b, 0x65, 0x59, 0x1f, 0x8e,
+ 0x75, 0x53, 0xc3, 0x4a, 0xc9, 0x4e, 0xa0, 0x35, 0x37, 0x53, 0xb8, 0xe3, 0x43, 0xaa, 0xba, 0xaa,
+ 0x3b, 0x9d, 0xd2, 0xe4, 0xdc, 0x7e, 0x73, 0x30, 0xd6, 0x93, 0xd3, 0x58, 0xa8, 0xcf, 0xab, 0xc0,
+ 0x79, 0x20, 0x94, 0xc3, 0x05, 0x28, 0x62, 0x5f, 0xc4, 0xe2, 0x9c, 0x6a, 0xc6, 0xfa, 0x73, 0x4d,
+ 0xbe, 0x1c, 0x4b, 0xe4, 0xc1, 0xa1, 0x30, 0xaf, 0x28, 0xc8, 0xd6, 0xf4, 0xc9, 0x08, 0x2b, 0x46,
+ 0x4f, 0xc3, 0x83, 0x63, 0x32, 0xc3, 0x44, 0x77, 0x21, 0x25, 0x3b, 0xf1, 0x3c, 0xb5, 0x43, 0xdd,
+ 0x4a, 0xef, 0xfd, 0x59, 0x72, 0x95, 0x94, 0x48, 0x43, 0x35, 0x71, 0xf5, 0x69, 0x7b, 0x89, 0x73,
+ 0xeb, 0xd0, 0x03, 0xf8, 0xcd, 0xd5, 0x68, 0xe6, 0x97, 0xed, 0xa6, 0x7f, 0xbc, 0xa6, 0x2e, 0x16,
+ 0x55, 0xa5, 0x3f, 0x1d, 0x40, 0x9a, 0xbd, 0x0e, 0xe6, 0x3d, 0x05, 0xb9, 0xaa, 0x88, 0xe5, 0x01,
+ 0xaf, 0x0d, 0xc3, 0x6a, 0x0e, 0x20, 0x2d, 0x59, 0x29, 0x01, 0x5b, 0x39, 0xa2, 0x28, 0xeb, 0xc1,
+ 0xbd, 0x3e, 0xc2, 0x05, 0x69, 0x1a, 0x59, 0x54, 0xd7, 0x4b, 0x0a, 0x50, 0x53, 0x9c, 0xa8, 0x4a,
+ 0x50, 0xd2, 0x6d, 0x58, 0x55, 0xad, 0x28, 0x11, 0xf3, 0x87, 0x47, 0xb4, 0x8b, 0x09, 0xc7, 0xa9,
+ 0x59, 0x54, 0xc2, 0x3b, 0x0a, 0x8a, 0x87, 0xba, 0x71, 0x21, 0x1a, 0x7d, 0xbb, 0xce, 0xd0, 0x64,
+ 0xbf, 0x18, 0xb4, 0x0f, 0x49, 0x07, 0x46, 0xc4, 0xf8, 0xd8, 0xa1, 0x36, 0xc2, 0x26, 0xe5, 0xe8,
+ 0x00, 0xd6, 0xdc, 0x29, 0x51, 0x59, 0xa4, 0xd5, 0x9d, 0x42, 0x5a, 0xa7, 0x0d, 0xcc, 0x6b, 0x0a,
+ 0x72, 0xd6, 0x09, 0xcf, 0x52, 0x54, 0x09, 0x29, 0xfa, 0xdb, 0xc3, 0xfa, 0x5a, 0x42, 0x6a, 0xee,
+ 0x47, 0xd4, 0xe4, 0xa2, 0x6d, 0xb3, 0xb5, 0xbc, 0xa1, 0x20, 0x1f, 0xa3, 0xc5, 0x9c, 0x4f, 0xcc,
+ 0x82, 0x9f, 0xec, 0x03, 0x05, 0x5b, 0x21, 0x41, 0x5d, 0xac, 0x1b, 0xa2, 0xaa, 0x9c, 0xd8, 0xfb,
+ 0x87, 0x1e, 0xc2, 0xba, 0x75, 0x99, 0xfb, 0xc2, 0xcf, 0x4b, 0x4b, 0x63, 0x2f, 0x84, 0xea, 0xb0,
+ 0x61, 0x3a, 0x40, 0xc1, 0xd9, 0xe8, 0xe9, 0x91, 0xb9, 0x9b, 0x5e, 0x0a, 0x0c, 0x24, 0x8c, 0xdf,
+ 0x4d, 0x7f, 0x90, 0x79, 0x01, 0x19, 0x56, 0x55, 0x0d, 0x45, 0xb5, 0x9c, 0x62, 0x4a, 0x0e, 0x1e,
+ 0xd7, 0xff, 0x33, 0x35, 0x45, 0x7e, 0x51, 0xe8, 0xfc, 0x76, 0x61, 0x5d, 0x19, 0xc9, 0x7a, 0x5f,
+ 0x11, 0x46, 0xe2, 0x48, 0x77, 0x8e, 0x70, 0x85, 0x4b, 0x3b, 0xb1, 0xb6, 0x15, 0x62, 0xbe, 0xa6,
+ 0xe0, 0xaf, 0x59, 0xdf, 0xeb, 0x1e, 0x24, 0xf0, 0xe5, 0xd8, 0xd9, 0xac, 0x8d, 0x3d, 0xc6, 0x1b,
+ 0x3f, 0xa3, 0xb8, 0xc4, 0x5f, 0x8e, 0x15, 0xce, 0xae, 0x47, 0x3c, 0x6c, 0x12, 0x2f, 0x12, 0x2e,
+ 0x34, 0x3c, 0x10, 0xc2, 0xdf, 0x8f, 0x8e, 0x58, 0x58, 0x00, 0xc5, 0x65, 0xe5, 0x59, 0x4e, 0x78,
+ 0x06, 0x05, 0x9f, 0xf7, 0x84, 0xc9, 0x2b, 0x36, 0x79, 0x77, 0x96, 0x15, 0x05, 0xe1, 0x39, 0x29,
+ 0xc6, 0xdb, 0xda, 0x90, 0xb5, 0x4d, 0x22, 0x4c, 0x4e, 0xd8, 0xe4, 0xad, 0x90, 0xaf, 0x04, 0xa1,
+ 0x48, 0x8d, 0x1a, 0xd3, 0x33, 0xa0, 0xcf, 0xdd, 0xa5, 0x27, 0x97, 0x2b, 0x88, 0xce, 0xaf, 0xda,
+ 0xe4, 0xff, 0x62, 0x4d, 0xc2, 0xcf, 0xe3, 0x8a, 0xe7, 0x37, 0x18, 0xcf, 0x19, 0x14, 0xfc, 0x97,
+ 0x38, 0x34, 0x27, 0x19, 0x3e, 0x9b, 0x98, 0x0d, 0xe5, 0x72, 0x38, 0xc6, 0x46, 0x44, 0x28, 0xc6,
+ 0xf3, 0xcd, 0x7c, 0xca, 0x1e, 0xc0, 0xfc, 0x70, 0x80, 0xc9, 0xe5, 0x71, 0x9c, 0x39, 0x8c, 0x60,
+ 0x27, 0x3a, 0x22, 0xb4, 0x59, 0x6b, 0xbf, 0xb2, 0x07, 0xdc, 0x16, 0xbe, 0x21, 0xcb, 0x7c, 0xa3,
+ 0x20, 0x61, 0xdd, 0x59, 0x94, 0x86, 0xd4, 0x69, 0xfb, 0x51, 0xbb, 0xd3, 0x6b, 0x67, 0x96, 0x50,
+ 0x01, 0x36, 0x6b, 0x9d, 0xd3, 0x36, 0xdf, 0xe0, 0x84, 0x5e, 0x8b, 0x3f, 0x12, 0x8e, 0x1b, 0x3c,
+ 0x5b, 0x67, 0x79, 0xb6, 0x9b, 0xa1, 0x10, 0x0d, 0x85, 0x2a, 0xcb, 0xd7, 0x8e, 0x04, 0xbe, 0x75,
+ 0x1c, 0xcd, 0x2f, 0xa3, 0x3c, 0x64, 0x9b, 0xec, 0x69, 0xb3, 0x11, 0xce, 0xac, 0x20, 0x06, 0xe8,
+ 0xc3, 0x0e, 0xd7, 0x63, 0xb9, 0x7a, 0xa3, 0x6e, 0x25, 0xb8, 0x56, 0x2d, 0x58, 0x94, 0x49, 0x58,
+ 0x74, 0x8b, 0x1b, 0x93, 0x5f, 0x45, 0xdb, 0x50, 0x8c, 0xcf, 0x77, 0x33, 0x49, 0xf4, 0x2f, 0xec,
+ 0x44, 0x0b, 0xba, 0x7c, 0x87, 0x63, 0x9b, 0x0d, 0xe1, 0xa4, 0xf3, 0xb8, 0x55, 0x7b, 0x92, 0x49,
+ 0x55, 0x5b, 0x57, 0xd7, 0x34, 0xf5, 0xf1, 0x9a, 0xa6, 0x3e, 0x5f, 0xd3, 0xd4, 0xdb, 0x2f, 0xf4,
+ 0xd2, 0xd3, 0xfd, 0x39, 0xff, 0x08, 0x49, 0x49, 0xfb, 0xbd, 0xf2, 0x3d, 0x00, 0x00, 0xff, 0xff,
+ 0x34, 0x3f, 0x09, 0x4f, 0x12, 0x0a, 0x00, 0x00,
}
diff --git a/src/metrics/generated/proto/metricpb/composite.proto b/src/metrics/generated/proto/metricpb/composite.proto
index b7b7076c3d..61555eb196 100644
--- a/src/metrics/generated/proto/metricpb/composite.proto
+++ b/src/metrics/generated/proto/metricpb/composite.proto
@@ -54,6 +54,11 @@ message TimedMetricWithMetadata {
TimedMetadata metadata = 2 [(gogoproto.nullable) = false];
}
+message TimedMetricWithMetadatas {
+ TimedMetric metric = 1 [(gogoproto.nullable) = false];
+ StagedMetadatas metadatas = 2 [(gogoproto.nullable) = false];
+}
+
message TimedMetricWithStoragePolicy {
TimedMetric timed_metric = 1 [(gogoproto.nullable) = false];
policypb.StoragePolicy storage_policy = 2 [(gogoproto.nullable) = false];
@@ -78,6 +83,8 @@ message MetricWithMetadatas {
GAUGE_WITH_METADATAS = 3;
FORWARDED_METRIC_WITH_METADATA = 4;
TIMED_METRIC_WITH_METADATA = 5;
+ TIMED_METRIC_WITH_METADATAS = 6;
+ TIMED_METRIC_WITH_STORAGE_POLICY = 7;
}
Type type = 1;
CounterWithMetadatas counter_with_metadatas = 2;
@@ -85,4 +92,6 @@ message MetricWithMetadatas {
GaugeWithMetadatas gauge_with_metadatas = 4;
ForwardedMetricWithMetadata forwarded_metric_with_metadata = 5;
TimedMetricWithMetadata timed_metric_with_metadata = 6;
+ TimedMetricWithMetadatas timed_metric_with_metadatas = 7;
+ TimedMetricWithStoragePolicy timed_metric_with_storage_policy = 8;
}
diff --git a/src/metrics/generated/proto/metricpb/metric.pb.go b/src/metrics/generated/proto/metricpb/metric.pb.go
index 206a511c1f..9dfe73caed 100644
--- a/src/metrics/generated/proto/metricpb/metric.pb.go
+++ b/src/metrics/generated/proto/metricpb/metric.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/metrics/generated/proto/metricpb/metric.proto
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -215,12 +215,37 @@ func (m *ForwardedMetric) GetValues() []float64 {
return nil
}
+type Tag struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *Tag) Reset() { *m = Tag{} }
+func (m *Tag) String() string { return proto.CompactTextString(m) }
+func (*Tag) ProtoMessage() {}
+func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptorMetric, []int{5} }
+
+func (m *Tag) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *Tag) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*Counter)(nil), "metricpb.Counter")
proto.RegisterType((*BatchTimer)(nil), "metricpb.BatchTimer")
proto.RegisterType((*Gauge)(nil), "metricpb.Gauge")
proto.RegisterType((*TimedMetric)(nil), "metricpb.TimedMetric")
proto.RegisterType((*ForwardedMetric)(nil), "metricpb.ForwardedMetric")
+ proto.RegisterType((*Tag)(nil), "metricpb.Tag")
proto.RegisterEnum("metricpb.MetricType", MetricType_name, MetricType_value)
}
func (m *Counter) Marshal() (dAtA []byte, err error) {
@@ -400,6 +425,36 @@ func (m *ForwardedMetric) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *Tag) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Tag) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintMetric(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintMetric(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ return i, nil
+}
+
func encodeVarintMetric(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -486,6 +541,20 @@ func (m *ForwardedMetric) Size() (n int) {
return n
}
+func (m *Tag) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovMetric(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovMetric(uint64(l))
+ }
+ return n
+}
+
func sovMetric(x uint64) (n int) {
for {
n++
@@ -1113,6 +1182,118 @@ func (m *ForwardedMetric) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *Tag) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetric
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Tag: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Tag: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetric
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthMetric
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMetric
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthMetric
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMetric(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthMetric
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipMetric(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
@@ -1223,27 +1404,28 @@ func init() {
}
var fileDescriptorMetric = []byte{
- // 340 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x49, 0xcf, 0x2c, 0xc9,
- 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0x35, 0x4e, 0x49, 0xd2, 0xcf, 0x35, 0xd6, 0x2f,
- 0x2e, 0x4a, 0xd6, 0xcf, 0x4d, 0x2d, 0x29, 0xca, 0x4c, 0x2e, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d,
- 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x87, 0x8a, 0x17, 0x24, 0x41, 0x19,
- 0x7a, 0x60, 0x51, 0x21, 0x0e, 0x98, 0xb0, 0x92, 0x3e, 0x17, 0xbb, 0x73, 0x7e, 0x69, 0x5e, 0x49,
- 0x6a, 0x91, 0x10, 0x1f, 0x17, 0x53, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x53,
- 0x66, 0x8a, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06,
- 0x73, 0x10, 0x84, 0xa3, 0x64, 0xc2, 0xc5, 0xe5, 0x94, 0x58, 0x92, 0x9c, 0x11, 0x92, 0x99, 0x8b,
- 0x45, 0x8f, 0x18, 0x17, 0x1b, 0x58, 0x59, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0x63, 0x10, 0x94,
- 0xa7, 0xa4, 0xcb, 0xc5, 0xea, 0x9e, 0x58, 0x9a, 0x9e, 0x8a, 0xdf, 0x12, 0x46, 0x98, 0x25, 0x35,
- 0x5c, 0xdc, 0x20, 0xf3, 0x53, 0x7c, 0xc1, 0xce, 0x14, 0xd2, 0xe0, 0x62, 0x29, 0xa9, 0x2c, 0x48,
- 0x05, 0x6b, 0xe3, 0x33, 0x12, 0xd1, 0x83, 0xb9, 0x5e, 0x0f, 0x22, 0x1f, 0x52, 0x59, 0x90, 0x1a,
- 0x04, 0x56, 0x01, 0x35, 0x9e, 0x09, 0x6e, 0xbc, 0x2c, 0x17, 0x57, 0x49, 0x66, 0x6e, 0x6a, 0x7c,
- 0x5e, 0x62, 0x5e, 0x7e, 0xb1, 0x04, 0x33, 0xd8, 0x23, 0x9c, 0x20, 0x11, 0x3f, 0x90, 0x00, 0xc2,
- 0x76, 0x16, 0x64, 0xdb, 0x9b, 0x18, 0xb9, 0xf8, 0xdd, 0xf2, 0x8b, 0xca, 0x13, 0x8b, 0x52, 0x68,
- 0xef, 0x04, 0x44, 0x88, 0xb1, 0x20, 0x87, 0x98, 0x96, 0x0d, 0x17, 0x17, 0xc2, 0x68, 0x21, 0x6e,
- 0x2e, 0xf6, 0x50, 0x3f, 0x6f, 0x3f, 0xff, 0x70, 0x3f, 0x01, 0x06, 0x10, 0xc7, 0xd9, 0x3f, 0xd4,
- 0x2f, 0xc4, 0x35, 0x48, 0x80, 0x51, 0x88, 0x93, 0x8b, 0x35, 0xc4, 0xd3, 0xd7, 0x35, 0x48, 0x80,
- 0x09, 0xc4, 0x74, 0x77, 0x0c, 0x75, 0x77, 0x15, 0x60, 0x76, 0xf2, 0x3c, 0xf1, 0x48, 0x8e, 0xf1,
- 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0x88, 0x32, 0x27, 0x33,
- 0xe1, 0x24, 0xb1, 0x81, 0xf9, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x24, 0x3b, 0xb0, 0x62,
- 0x7a, 0x02, 0x00, 0x00,
+ // 363 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xcd, 0x6a, 0xe2, 0x50,
+ 0x18, 0xf5, 0x26, 0x51, 0xc7, 0x4f, 0x71, 0xc2, 0x45, 0x86, 0x6c, 0x26, 0x88, 0xab, 0x30, 0x30,
+ 0xb9, 0x30, 0x0e, 0x74, 0xd3, 0x4d, 0xb5, 0x56, 0xa4, 0x18, 0x21, 0x44, 0x0a, 0xdd, 0x94, 0x9b,
+ 0xe4, 0x12, 0x03, 0xcd, 0x0f, 0xc9, 0x4d, 0x8b, 0xd0, 0x55, 0x9f, 0xa0, 0x8f, 0xd5, 0x65, 0x1f,
+ 0xa1, 0xd8, 0x17, 0x29, 0xb9, 0xc6, 0x5a, 0xa1, 0x74, 0x51, 0xe8, 0xee, 0x3b, 0x27, 0xdf, 0x77,
+ 0xce, 0xc9, 0xe1, 0xc2, 0x69, 0x10, 0xf2, 0x55, 0xe1, 0x9a, 0x5e, 0x12, 0x91, 0x68, 0xe8, 0xbb,
+ 0x24, 0x1a, 0x92, 0x3c, 0xf3, 0x48, 0xc4, 0x78, 0x16, 0x7a, 0x39, 0x09, 0x58, 0xcc, 0x32, 0xca,
+ 0x99, 0x4f, 0xd2, 0x2c, 0xe1, 0x49, 0xc5, 0xa7, 0x6e, 0x35, 0x98, 0x82, 0xc5, 0x3f, 0x76, 0xf4,
+ 0x80, 0x40, 0x73, 0x9c, 0x14, 0x31, 0x67, 0x19, 0xee, 0x82, 0x14, 0xfa, 0x1a, 0xea, 0x23, 0xa3,
+ 0x63, 0x4b, 0xa1, 0x8f, 0x7b, 0x50, 0xbf, 0xa1, 0xd7, 0x05, 0xd3, 0xa4, 0x3e, 0x32, 0x64, 0x7b,
+ 0x0b, 0x06, 0xff, 0x01, 0x46, 0x94, 0x7b, 0x2b, 0x27, 0x8c, 0x3e, 0xb8, 0xf9, 0x05, 0x0d, 0xb1,
+ 0x96, 0x6b, 0x52, 0x5f, 0x36, 0x90, 0x5d, 0xa1, 0xc1, 0x5f, 0xa8, 0x4f, 0x69, 0x11, 0xb0, 0xcf,
+ 0x4d, 0xd0, 0xce, 0xe4, 0x0e, 0xda, 0xa5, 0xbe, 0x3f, 0x17, 0x31, 0xb1, 0x01, 0x0a, 0x5f, 0xa7,
+ 0x4c, 0x9c, 0x75, 0xff, 0xf5, 0xcc, 0x5d, 0x7a, 0x73, 0xfb, 0xdd, 0x59, 0xa7, 0xcc, 0x16, 0x1b,
+ 0x95, 0xbc, 0xf4, 0x26, 0xff, 0x1b, 0x80, 0x87, 0x11, 0xbb, 0x8a, 0x69, 0x9c, 0xe4, 0x9a, 0x2c,
+ 0x7e, 0xa4, 0x55, 0x32, 0x56, 0x49, 0xec, 0xdd, 0x95, 0xf7, 0xee, 0xf7, 0x08, 0x7e, 0x9e, 0x25,
+ 0xd9, 0x2d, 0xcd, 0xfc, 0xef, 0x8f, 0xb0, 0x6f, 0x4c, 0x39, 0x68, 0x8c, 0x80, 0xec, 0xd0, 0x00,
+ 0x63, 0x50, 0x62, 0x1a, 0xb1, 0xaa, 0x31, 0x31, 0x1f, 0x76, 0xd6, 0xa9, 0x52, 0xff, 0x39, 0x06,
+ 0xd8, 0x67, 0xc1, 0x6d, 0x68, 0x2e, 0xad, 0x73, 0x6b, 0x71, 0x61, 0xa9, 0xb5, 0x12, 0x8c, 0x17,
+ 0x4b, 0xcb, 0x99, 0xd8, 0x2a, 0xc2, 0x2d, 0xa8, 0x3b, 0xb3, 0xf9, 0xc4, 0x56, 0xa5, 0x72, 0x9c,
+ 0x9e, 0x2c, 0xa7, 0x13, 0x55, 0x1e, 0xcd, 0x1e, 0x37, 0x3a, 0x7a, 0xda, 0xe8, 0xe8, 0x79, 0xa3,
+ 0xa3, 0x87, 0x17, 0xbd, 0x76, 0x79, 0xf4, 0xc5, 0x97, 0xe6, 0x36, 0x04, 0x1e, 0xbe, 0x06, 0x00,
+ 0x00, 0xff, 0xff, 0x50, 0xec, 0x69, 0x54, 0xab, 0x02, 0x00, 0x00,
}
diff --git a/src/metrics/generated/proto/metricpb/metric.proto b/src/metrics/generated/proto/metricpb/metric.proto
index 8fee1a83fa..f296204bb2 100644
--- a/src/metrics/generated/proto/metricpb/metric.proto
+++ b/src/metrics/generated/proto/metricpb/metric.proto
@@ -59,3 +59,8 @@ message ForwardedMetric {
int64 time_nanos = 3;
repeated double values = 4;
}
+
+message Tag {
+ bytes name = 1;
+ bytes value = 2;
+}
diff --git a/src/metrics/generated/proto/rulepb/rule.pb.go b/src/metrics/generated/proto/rulepb/rule.pb.go
index 60e8f3480e..c79ff867c4 100644
--- a/src/metrics/generated/proto/rulepb/rule.pb.go
+++ b/src/metrics/generated/proto/rulepb/rule.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/metrics/generated/proto/rulepb/rule.proto
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -29,6 +29,7 @@ import math "math"
import aggregationpb "github.com/m3db/m3/src/metrics/generated/proto/aggregationpb"
import pipelinepb "github.com/m3db/m3/src/metrics/generated/proto/pipelinepb"
import policypb "github.com/m3db/m3/src/metrics/generated/proto/policypb"
+import metricpb "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
import io "io"
@@ -49,6 +50,7 @@ type MappingRuleSnapshot struct {
AggregationTypes []aggregationpb.AggregationType `protobuf:"varint,8,rep,packed,name=aggregation_types,json=aggregationTypes,enum=aggregationpb.AggregationType" json:"aggregation_types,omitempty"`
StoragePolicies []*policypb.StoragePolicy `protobuf:"bytes,9,rep,name=storage_policies,json=storagePolicies" json:"storage_policies,omitempty"`
DropPolicy policypb.DropPolicy `protobuf:"varint,10,opt,name=drop_policy,json=dropPolicy,proto3,enum=policypb.DropPolicy" json:"drop_policy,omitempty"`
+ Tags []*metricpb.Tag `protobuf:"bytes,11,rep,name=tags" json:"tags,omitempty"`
}
func (m *MappingRuleSnapshot) Reset() { *m = MappingRuleSnapshot{} }
@@ -126,6 +128,13 @@ func (m *MappingRuleSnapshot) GetDropPolicy() policypb.DropPolicy {
return policypb.DropPolicy_NONE
}
+func (m *MappingRuleSnapshot) GetTags() []*metricpb.Tag {
+ if m != nil {
+ return m.Tags
+ }
+ return nil
+}
+
type MappingRule struct {
Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
Snapshots []*MappingRuleSnapshot `protobuf:"bytes,2,rep,name=snapshots" json:"snapshots,omitempty"`
@@ -494,6 +503,18 @@ func (m *MappingRuleSnapshot) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintRule(dAtA, i, uint64(m.DropPolicy))
}
+ if len(m.Tags) > 0 {
+ for _, msg := range m.Tags {
+ dAtA[i] = 0x5a
+ i++
+ i = encodeVarintRule(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
return i, nil
}
@@ -880,6 +901,12 @@ func (m *MappingRuleSnapshot) Size() (n int) {
if m.DropPolicy != 0 {
n += 1 + sovRule(uint64(m.DropPolicy))
}
+ if len(m.Tags) > 0 {
+ for _, e := range m.Tags {
+ l = e.Size()
+ n += 1 + l + sovRule(uint64(l))
+ }
+ }
return n
}
@@ -1364,6 +1391,37 @@ func (m *MappingRuleSnapshot) Unmarshal(dAtA []byte) error {
break
}
}
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRule
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRule
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tags = append(m.Tags, &metricpb.Tag{})
+ if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRule(dAtA[iNdEx:])
@@ -2501,49 +2559,51 @@ func init() {
}
var fileDescriptorRule = []byte{
- // 702 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x55, 0x41, 0x6b, 0xdb, 0x4a,
- 0x10, 0x7e, 0xb2, 0x1d, 0xdb, 0x1a, 0x3b, 0x8e, 0xdf, 0x26, 0x2f, 0x4f, 0xe4, 0x3d, 0x8c, 0x71,
- 0xa1, 0xf8, 0x50, 0xe4, 0x56, 0x21, 0x90, 0xde, 0x9a, 0x10, 0x68, 0xa1, 0x34, 0x84, 0x4d, 0x9a,
- 0x43, 0x28, 0x18, 0xd9, 0xda, 0x2a, 0x02, 0x49, 0xbb, 0xec, 0xae, 0x02, 0xfe, 0x03, 0x3d, 0xf7,
- 0x5f, 0xb5, 0xc7, 0xde, 0x7a, 0x2d, 0xe9, 0x7f, 0xe8, 0xb9, 0x68, 0x57, 0xb2, 0x25, 0xa2, 0x10,
- 0x5c, 0x28, 0x3d, 0x65, 0x76, 0x34, 0xfb, 0xed, 0xcc, 0xf7, 0x7d, 0x13, 0xc3, 0x0b, 0x3f, 0x90,
- 0xd7, 0xc9, 0xcc, 0x9e, 0xd3, 0x68, 0x12, 0xed, 0x7b, 0xb3, 0x49, 0xb4, 0x3f, 0x11, 0x7c, 0x3e,
- 0x89, 0x88, 0xe4, 0xc1, 0x5c, 0x4c, 0x7c, 0x12, 0x13, 0xee, 0x4a, 0xe2, 0x4d, 0x18, 0xa7, 0x92,
- 0x4e, 0x78, 0x12, 0x12, 0x36, 0x53, 0x7f, 0x6c, 0x95, 0x41, 0x4d, 0x9d, 0xda, 0x3b, 0x5d, 0x13,
- 0xc9, 0xf5, 0x7d, 0x4e, 0x7c, 0x57, 0x06, 0x34, 0x66, 0xb3, 0xe2, 0x49, 0xe3, 0xee, 0xbd, 0x5a,
- 0x13, 0x8f, 0x05, 0x8c, 0x84, 0x41, 0x9c, 0x76, 0x97, 0x87, 0x19, 0xd2, 0xc9, 0xba, 0x48, 0x34,
- 0x0c, 0xe6, 0x8b, 0x14, 0x47, 0x05, 0x1a, 0x65, 0xf4, 0xb5, 0x0e, 0xdb, 0x6f, 0x5c, 0xc6, 0x82,
- 0xd8, 0xc7, 0x49, 0x48, 0xce, 0x63, 0x97, 0x89, 0x6b, 0x2a, 0x11, 0x82, 0x46, 0xec, 0x46, 0xc4,
- 0x32, 0x86, 0xc6, 0xd8, 0xc4, 0x2a, 0x46, 0x03, 0x00, 0x49, 0xa3, 0x99, 0x90, 0x34, 0x26, 0x9e,
- 0x55, 0x1b, 0x1a, 0xe3, 0x36, 0x2e, 0x64, 0xd0, 0x23, 0xd8, 0x9c, 0x27, 0x92, 0xde, 0x10, 0x3e,
- 0x8d, 0xdd, 0x98, 0x0a, 0xab, 0x3e, 0x34, 0xc6, 0x75, 0xdc, 0xcd, 0x92, 0xa7, 0x69, 0x0e, 0xed,
- 0x42, 0xf3, 0x7d, 0x10, 0x4a, 0xc2, 0xad, 0x86, 0x82, 0xce, 0x4e, 0xe8, 0x09, 0xb4, 0x55, 0x63,
- 0x01, 0x11, 0xd6, 0xc6, 0xb0, 0x3e, 0xee, 0x38, 0x7d, 0x3b, 0x6f, 0xd9, 0x3e, 0x53, 0x01, 0x5e,
- 0x56, 0xa0, 0x67, 0xf0, 0x4f, 0xe8, 0x0a, 0x39, 0x4d, 0x98, 0x97, 0x8e, 0x38, 0x75, 0x65, 0xf6,
- 0x64, 0x53, 0x3d, 0x89, 0xd2, 0x8f, 0x6f, 0xf5, 0xb7, 0x23, 0xa9, 0x1f, 0x7e, 0x0c, 0x5b, 0xa5,
- 0x2b, 0xb3, 0x85, 0xd5, 0x52, 0x1d, 0x6c, 0x16, 0x8a, 0x8f, 0x17, 0xe8, 0x35, 0xfc, 0x5d, 0x90,
- 0x6d, 0x2a, 0x17, 0x8c, 0x08, 0xab, 0x3d, 0xac, 0x8f, 0x7b, 0xce, 0xc0, 0x2e, 0xc9, 0x6b, 0x1f,
- 0xad, 0x4e, 0x17, 0x0b, 0x46, 0x70, 0xdf, 0x2d, 0x27, 0x04, 0x3a, 0x86, 0xbe, 0x90, 0x94, 0xbb,
- 0x3e, 0x99, 0x2e, 0xa7, 0x33, 0xd5, 0x74, 0xff, 0xae, 0xa6, 0x3b, 0xd7, 0x15, 0xd9, 0x90, 0x5b,
- 0xa2, 0x70, 0x4c, 0x67, 0x3d, 0x80, 0x8e, 0xc7, 0x29, 0xd3, 0x00, 0x0b, 0x0b, 0x86, 0xc6, 0xb8,
- 0xe7, 0xec, 0xac, 0xae, 0x9f, 0x70, 0xca, 0xb2, 0xbb, 0xe0, 0x2d, 0xe3, 0xd1, 0x3b, 0xe8, 0x14,
- 0x84, 0x4d, 0x05, 0x4d, 0x92, 0xc0, 0xcb, 0x05, 0x4d, 0x63, 0xf4, 0x1c, 0x4c, 0x91, 0x09, 0x2e,
- 0xac, 0x9a, 0x6a, 0xeb, 0x3f, 0x5b, 0x1b, 0xdf, 0xae, 0x30, 0x05, 0x5e, 0x55, 0x8f, 0x3c, 0xe8,
- 0x62, 0x1a, 0x86, 0x09, 0xbb, 0x70, 0xb9, 0x4f, 0xaa, 0xfd, 0x82, 0xa0, 0x21, 0x5d, 0x5f, 0x23,
- 0x9b, 0x58, 0xc5, 0x25, 0x99, 0xeb, 0x0f, 0xc9, 0x3c, 0xfa, 0x60, 0x40, 0xaf, 0xf8, 0xcc, 0xa5,
- 0x83, 0x9e, 0x42, 0x3b, 0x5f, 0x04, 0xf5, 0x58, 0x27, 0xa5, 0x62, 0xb9, 0x24, 0xf6, 0x59, 0x16,
- 0xe2, 0x65, 0x55, 0xa5, 0x06, 0xb5, 0xf5, 0x34, 0x18, 0x7d, 0xaa, 0x01, 0xd2, 0x8d, 0xfc, 0xd9,
- 0x2d, 0xb1, 0xa1, 0x25, 0x15, 0x13, 0xf9, 0x92, 0xec, 0xe4, 0x7a, 0x15, 0x69, 0xc2, 0x79, 0xd1,
- 0xef, 0xdc, 0x93, 0x03, 0x80, 0xec, 0x95, 0xe9, 0x8d, 0xa3, 0x16, 0xa4, 0xe3, 0xec, 0x56, 0x75,
- 0x73, 0xe9, 0x60, 0x33, 0xab, 0xbc, 0x74, 0x46, 0x57, 0x00, 0x2b, 0x22, 0x2b, 0x5d, 0x79, 0x78,
- 0xd7, 0x95, 0x7b, 0x65, 0xdc, 0xfb, 0x4c, 0xf9, 0xa3, 0x06, 0x2d, 0xf5, 0x4d, 0x1b, 0xf2, 0x0e,
- 0xf2, 0xff, 0x60, 0xa6, 0x12, 0x09, 0xe6, 0xce, 0x89, 0x52, 0xc6, 0xc4, 0xab, 0x04, 0x1a, 0x43,
- 0x7f, 0xce, 0x49, 0x99, 0x26, 0xad, 0x4d, 0x2f, 0xcb, 0xe7, 0x14, 0xdd, 0xcb, 0x6a, 0xe3, 0x5e,
- 0x56, 0xcb, 0xae, 0xd8, 0x78, 0xd8, 0x15, 0xcd, 0x0a, 0x57, 0x1c, 0xc2, 0x66, 0xa4, 0xd7, 0x72,
- 0x9a, 0xf2, 0x21, 0xac, 0x96, 0x62, 0x67, 0xbb, 0x62, 0x67, 0x71, 0x37, 0x5a, 0x1d, 0xd2, 0xff,
- 0x21, 0x5d, 0xae, 0xa8, 0xcb, 0x2e, 0x6a, 0xb9, 0xd0, 0x5d, 0x5a, 0x71, 0x87, 0x2f, 0xe3, 0x4a,
- 0x2f, 0x98, 0x15, 0x5e, 0x38, 0x7e, 0xf9, 0xf9, 0x76, 0x60, 0x7c, 0xb9, 0x1d, 0x18, 0xdf, 0x6e,
- 0x07, 0xc6, 0xc7, 0xef, 0x83, 0xbf, 0xae, 0x0e, 0x7e, 0xe9, 0x17, 0x78, 0xd6, 0x54, 0xa7, 0xfd,
- 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x69, 0x70, 0x7f, 0x58, 0xc1, 0x07, 0x00, 0x00,
+ // 728 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x55, 0xdd, 0x6a, 0xdb, 0x48,
+ 0x14, 0x5e, 0xd9, 0x8e, 0x6d, 0x1d, 0xff, 0xc4, 0x3b, 0xc9, 0x66, 0x45, 0x76, 0x31, 0x5e, 0x2f,
+ 0x2c, 0xbe, 0x58, 0xe4, 0x5d, 0x85, 0x40, 0xf6, 0x6e, 0x13, 0x02, 0x2d, 0x94, 0x86, 0x30, 0x49,
+ 0x73, 0x11, 0x0a, 0x66, 0x64, 0x4d, 0x15, 0x81, 0x7e, 0x86, 0x99, 0x51, 0xc0, 0x2f, 0xd0, 0xeb,
+ 0xbe, 0x55, 0x7b, 0xd9, 0x47, 0x28, 0xe9, 0x3b, 0xf4, 0xa2, 0x57, 0x45, 0x33, 0x92, 0x2d, 0x13,
+ 0x85, 0xe0, 0x40, 0xe9, 0x95, 0xcf, 0x9c, 0x39, 0xf3, 0x9d, 0x9f, 0xef, 0x3b, 0x16, 0xfc, 0xef,
+ 0x07, 0xf2, 0x26, 0x75, 0xed, 0x79, 0x12, 0x4d, 0xa3, 0x03, 0xcf, 0x9d, 0x46, 0x07, 0x53, 0xc1,
+ 0xe7, 0xd3, 0x88, 0x4a, 0x1e, 0xcc, 0xc5, 0xd4, 0xa7, 0x31, 0xe5, 0x44, 0x52, 0x6f, 0xca, 0x78,
+ 0x22, 0x93, 0x29, 0x4f, 0x43, 0xca, 0x5c, 0xf5, 0x63, 0x2b, 0x0f, 0x6a, 0x6a, 0xd7, 0xfe, 0xd9,
+ 0x86, 0x48, 0xc4, 0xf7, 0x39, 0xf5, 0x89, 0x0c, 0x92, 0x98, 0xb9, 0xe5, 0x93, 0xc6, 0xdd, 0x7f,
+ 0xbe, 0x21, 0x1e, 0x0b, 0x18, 0x0d, 0x83, 0x38, 0xab, 0xae, 0x30, 0x73, 0xa4, 0xd3, 0x4d, 0x91,
+ 0x92, 0x30, 0x98, 0x2f, 0x32, 0x1c, 0x65, 0x3c, 0x11, 0x45, 0xfb, 0x99, 0x9b, 0x1b, 0x1a, 0x65,
+ 0xfc, 0xb5, 0x0e, 0x3b, 0x2f, 0x09, 0x63, 0x41, 0xec, 0xe3, 0x34, 0xa4, 0x17, 0x31, 0x61, 0xe2,
+ 0x26, 0x91, 0x08, 0x41, 0x23, 0x26, 0x11, 0xb5, 0x8c, 0x91, 0x31, 0x31, 0xb1, 0xb2, 0xd1, 0x10,
+ 0x40, 0x26, 0x91, 0x2b, 0x64, 0x12, 0x53, 0xcf, 0xaa, 0x8d, 0x8c, 0x49, 0x1b, 0x97, 0x3c, 0xe8,
+ 0x4f, 0xe8, 0xcd, 0x53, 0x99, 0xdc, 0x52, 0x3e, 0x8b, 0x49, 0x9c, 0x08, 0xab, 0x3e, 0x32, 0x26,
+ 0x75, 0xdc, 0xcd, 0x9d, 0x67, 0x99, 0x0f, 0xed, 0x41, 0xf3, 0x4d, 0x10, 0x4a, 0xca, 0xad, 0x86,
+ 0x82, 0xce, 0x4f, 0xe8, 0x6f, 0x68, 0xab, 0xf6, 0x02, 0x2a, 0xac, 0xad, 0x51, 0x7d, 0xd2, 0x71,
+ 0x06, 0x76, 0xd1, 0xb8, 0x7d, 0xae, 0x0c, 0xbc, 0x8c, 0x40, 0xff, 0xc2, 0x2f, 0x21, 0x11, 0x72,
+ 0x96, 0x32, 0x2f, 0x6b, 0x71, 0x46, 0x64, 0x9e, 0xb2, 0xa9, 0x52, 0xa2, 0xec, 0xf2, 0x95, 0xbe,
+ 0x3b, 0x96, 0x3a, 0xf1, 0x5f, 0xb0, 0xbd, 0xf6, 0xc4, 0x5d, 0x58, 0x2d, 0x55, 0x41, 0xaf, 0x14,
+ 0x7c, 0xb2, 0x40, 0x2f, 0xe0, 0xe7, 0x12, 0xf9, 0x33, 0xb9, 0x60, 0x54, 0x58, 0xed, 0x51, 0x7d,
+ 0xd2, 0x77, 0x86, 0xf6, 0x9a, 0x48, 0xec, 0xe3, 0xd5, 0xe9, 0x72, 0xc1, 0x28, 0x1e, 0x90, 0x75,
+ 0x87, 0x40, 0x27, 0x30, 0x10, 0x32, 0xe1, 0xc4, 0xa7, 0xb3, 0x65, 0x77, 0xa6, 0xea, 0xee, 0xd7,
+ 0x55, 0x77, 0x17, 0x3a, 0x22, 0x6f, 0x72, 0x5b, 0x94, 0x8e, 0x59, 0xaf, 0x87, 0xd0, 0xf1, 0x78,
+ 0xc2, 0x34, 0xc0, 0xc2, 0x82, 0x91, 0x31, 0xe9, 0x3b, 0xbb, 0xab, 0xe7, 0xa7, 0x3c, 0x61, 0xf9,
+ 0x5b, 0xf0, 0x96, 0x36, 0xfa, 0x03, 0x1a, 0x92, 0xf8, 0xc2, 0xea, 0xa8, 0x74, 0x3d, 0xbb, 0xe0,
+ 0xdf, 0xbe, 0x24, 0x3e, 0x56, 0x57, 0xe3, 0xd7, 0xd0, 0x29, 0x71, 0x9f, 0x71, 0x9e, 0xa6, 0x81,
+ 0x57, 0x70, 0x9e, 0xd9, 0xe8, 0x3f, 0x30, 0x45, 0xae, 0x09, 0x61, 0xd5, 0x14, 0xd4, 0x6f, 0xb6,
+ 0xde, 0x30, 0xbb, 0x42, 0x37, 0x78, 0x15, 0x3d, 0xf6, 0xa0, 0x8b, 0x93, 0x30, 0x4c, 0xd9, 0x25,
+ 0xe1, 0x3e, 0xad, 0x96, 0x14, 0xca, 0x8b, 0xcc, 0x90, 0x4d, 0x5d, 0xd5, 0x9a, 0x12, 0xea, 0x8f,
+ 0x29, 0x61, 0xfc, 0xd6, 0x80, 0x7e, 0x39, 0xcd, 0x95, 0x83, 0xfe, 0x81, 0x76, 0xb1, 0x71, 0x2a,
+ 0x59, 0x27, 0x9b, 0xd6, 0x72, 0x1b, 0xed, 0xf3, 0xdc, 0xc4, 0xcb, 0xa8, 0x4a, 0x9a, 0x6a, 0x9b,
+ 0xd1, 0x34, 0x7e, 0x5f, 0x03, 0xa4, 0x0b, 0xf9, 0xb1, 0x8b, 0x64, 0x43, 0x4b, 0xaa, 0x49, 0x14,
+ 0x7b, 0xb4, 0x5b, 0xf0, 0x55, 0x1e, 0x13, 0x2e, 0x82, 0xbe, 0xe7, 0x2a, 0x1d, 0x02, 0xe4, 0x59,
+ 0x66, 0xb7, 0x8e, 0xda, 0xa1, 0x8e, 0xb3, 0x57, 0x55, 0xcd, 0x95, 0x83, 0xcd, 0x3c, 0xf2, 0xca,
+ 0x19, 0x5f, 0x03, 0xac, 0x06, 0x59, 0xa9, 0xca, 0xa3, 0xfb, 0xaa, 0xdc, 0x5f, 0xc7, 0x7d, 0x48,
+ 0x94, 0x5f, 0x6a, 0xd0, 0x52, 0x77, 0x5a, 0x90, 0xf7, 0x90, 0x7f, 0x07, 0x33, 0xa3, 0x48, 0x30,
+ 0x32, 0xa7, 0x8a, 0x19, 0x13, 0xaf, 0x1c, 0x68, 0x02, 0x83, 0x39, 0xa7, 0xeb, 0x63, 0xd2, 0xdc,
+ 0xf4, 0x73, 0x7f, 0x31, 0xa2, 0x07, 0xa7, 0xda, 0x78, 0x70, 0xaa, 0xeb, 0xaa, 0xd8, 0x7a, 0x5c,
+ 0x15, 0xcd, 0x0a, 0x55, 0x1c, 0x41, 0x2f, 0xd2, 0x6b, 0x39, 0xcb, 0xe6, 0x21, 0xac, 0x96, 0x9a,
+ 0xce, 0x4e, 0xc5, 0xce, 0xe2, 0x6e, 0xb4, 0x3a, 0x64, 0x7f, 0x33, 0x5d, 0xae, 0x46, 0x97, 0x3f,
+ 0xd4, 0x74, 0xa1, 0xfb, 0x63, 0xc5, 0x1d, 0xbe, 0xb4, 0x2b, 0xb5, 0x60, 0x56, 0x68, 0xe1, 0xe4,
+ 0xd9, 0x87, 0xbb, 0xa1, 0xf1, 0xf1, 0x6e, 0x68, 0x7c, 0xba, 0x1b, 0x1a, 0xef, 0x3e, 0x0f, 0x7f,
+ 0xba, 0x3e, 0x7c, 0xd2, 0xa7, 0xde, 0x6d, 0xaa, 0xd3, 0xc1, 0xb7, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xd4, 0x82, 0xeb, 0xd4, 0x2a, 0x08, 0x00, 0x00,
}
diff --git a/src/metrics/generated/proto/rulepb/rule.proto b/src/metrics/generated/proto/rulepb/rule.proto
index 2a0f3682a2..2c4d02d7f5 100644
--- a/src/metrics/generated/proto/rulepb/rule.proto
+++ b/src/metrics/generated/proto/rulepb/rule.proto
@@ -27,6 +27,7 @@ package rulepb;
import "github.com/m3db/m3/src/metrics/generated/proto/aggregationpb/aggregation.proto";
import "github.com/m3db/m3/src/metrics/generated/proto/pipelinepb/pipeline.proto";
import "github.com/m3db/m3/src/metrics/generated/proto/policypb/policy.proto";
+import "github.com/m3db/m3/src/metrics/generated/proto/metricpb/metric.proto";
message MappingRuleSnapshot {
string name = 1;
@@ -40,6 +41,7 @@ message MappingRuleSnapshot {
repeated aggregationpb.AggregationType aggregation_types = 8;
repeated policypb.StoragePolicy storage_policies = 9;
policypb.DropPolicy drop_policy = 10;
+ repeated metricpb.Tag tags = 11;
}
message MappingRule {
diff --git a/src/metrics/generated/proto/transformationpb/transformation.pb.go b/src/metrics/generated/proto/transformationpb/transformation.pb.go
index c4fd69f38c..808d10685a 100644
--- a/src/metrics/generated/proto/transformationpb/transformation.pb.go
+++ b/src/metrics/generated/proto/transformationpb/transformation.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/metrics/generated/proto/transformationpb/transformation.proto
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -52,17 +52,23 @@ const (
TransformationType_UNKNOWN TransformationType = 0
TransformationType_ABSOLUTE TransformationType = 1
TransformationType_PERSECOND TransformationType = 2
+ TransformationType_INCREASE TransformationType = 3
+ TransformationType_ADD TransformationType = 4
)
var TransformationType_name = map[int32]string{
0: "UNKNOWN",
1: "ABSOLUTE",
2: "PERSECOND",
+ 3: "INCREASE",
+ 4: "ADD",
}
var TransformationType_value = map[string]int32{
"UNKNOWN": 0,
"ABSOLUTE": 1,
"PERSECOND": 2,
+ "INCREASE": 3,
+ "ADD": 4,
}
func (x TransformationType) String() string {
@@ -81,17 +87,18 @@ func init() {
}
var fileDescriptorTransformation = []byte{
- // 180 bytes of a gzipped FileDescriptorProto
+ // 201 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x0a, 0x49, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0x35, 0x4e, 0x49, 0xd2, 0xcf, 0x35, 0xd6, 0x2f,
0x2e, 0x4a, 0xd6, 0xcf, 0x4d, 0x2d, 0x29, 0xca, 0x4c, 0x2e, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d,
0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x29, 0x4a, 0xcc, 0x2b,
0x4e, 0xcb, 0x2f, 0xca, 0x4d, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x48, 0x42, 0x13, 0xd0, 0x03, 0xab,
- 0x12, 0x12, 0x40, 0x57, 0xa6, 0x65, 0xc7, 0x25, 0x14, 0x82, 0x22, 0x16, 0x52, 0x59, 0x90, 0x2a,
+ 0x12, 0x12, 0x40, 0x57, 0xa6, 0x15, 0xca, 0x25, 0x14, 0x82, 0x22, 0x16, 0x52, 0x59, 0x90, 0x2a,
0xc4, 0xcd, 0xc5, 0x1e, 0xea, 0xe7, 0xed, 0xe7, 0x1f, 0xee, 0x27, 0xc0, 0x20, 0xc4, 0xc3, 0xc5,
0xe1, 0xe8, 0x14, 0xec, 0xef, 0x13, 0x1a, 0xe2, 0x2a, 0xc0, 0x28, 0xc4, 0xcb, 0xc5, 0x19, 0xe0,
- 0x1a, 0x14, 0xec, 0xea, 0xec, 0xef, 0xe7, 0x22, 0xc0, 0xe4, 0x14, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
- 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x4f, 0xa1,
- 0xcb, 0x93, 0xd8, 0xc0, 0xe2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe7, 0xc2, 0x13,
- 0x03, 0x01, 0x00, 0x00,
+ 0x1a, 0x14, 0xec, 0xea, 0xec, 0xef, 0xe7, 0x22, 0xc0, 0x04, 0x92, 0xf4, 0xf4, 0x73, 0x0e, 0x72,
+ 0x75, 0x0c, 0x76, 0x15, 0x60, 0x16, 0x62, 0xe7, 0x62, 0x76, 0x74, 0x71, 0x11, 0x60, 0x71, 0x0a,
+ 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96,
+ 0x63, 0x88, 0xb2, 0xa7, 0xd0, 0x43, 0x49, 0x6c, 0x60, 0x71, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0xf3, 0x67, 0xfb, 0xf2, 0x1a, 0x01, 0x00, 0x00,
}
diff --git a/src/metrics/generated/proto/transformationpb/transformation.proto b/src/metrics/generated/proto/transformationpb/transformation.proto
index 681a490b17..640f043bfc 100644
--- a/src/metrics/generated/proto/transformationpb/transformation.proto
+++ b/src/metrics/generated/proto/transformationpb/transformation.proto
@@ -28,4 +28,6 @@ enum TransformationType {
UNKNOWN = 0;
ABSOLUTE = 1;
PERSECOND = 2;
+ INCREASE = 3;
+ ADD = 4;
}
diff --git a/src/metrics/matcher/cache/elem_new_map_gen.go b/src/metrics/matcher/cache/elem_new_map_gen.go
index 723ea6136e..2e1b7c9706 100644
--- a/src/metrics/matcher/cache/elem_new_map_gen.go
+++ b/src/metrics/matcher/cache/elem_new_map_gen.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/metrics/matcher/cache/namespace_results_new_map_gen.go b/src/metrics/matcher/cache/namespace_results_new_map_gen.go
index 96cb0e1ae1..86c9b9d035 100644
--- a/src/metrics/matcher/cache/namespace_results_new_map_gen.go
+++ b/src/metrics/matcher/cache/namespace_results_new_map_gen.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/metrics/matcher/namespace_rule_sets_new_map_gen.go b/src/metrics/matcher/namespace_rule_sets_new_map_gen.go
index 61c0f064c5..bb6071ff57 100644
--- a/src/metrics/matcher/namespace_rule_sets_new_map_gen.go
+++ b/src/metrics/matcher/namespace_rule_sets_new_map_gen.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/metrics/matcher/rule_namespaces_new_map_gen.go b/src/metrics/matcher/rule_namespaces_new_map_gen.go
index 3ac597f3d0..30fbb5668e 100644
--- a/src/metrics/matcher/rule_namespaces_new_map_gen.go
+++ b/src/metrics/matcher/rule_namespaces_new_map_gen.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
)
// Copyright (c) 2018 Uber Technologies, Inc.
diff --git a/src/metrics/matcher/ruleset.go b/src/metrics/matcher/ruleset.go
index 1257fd9f42..fcc037f135 100644
--- a/src/metrics/matcher/ruleset.go
+++ b/src/metrics/matcher/ruleset.go
@@ -61,9 +61,9 @@ type ruleSetMetrics struct {
updated tally.Counter
}
-func newRuleSetMetrics(scope tally.Scope, samplingRate float64) ruleSetMetrics {
+func newRuleSetMetrics(scope tally.Scope, opts instrument.TimerOptions) ruleSetMetrics {
return ruleSetMetrics{
- match: instrument.NewMethodMetrics(scope, "match", samplingRate),
+ match: instrument.NewMethodMetrics(scope, "match", opts),
nilMatcher: scope.Counter("nil-matcher"),
updated: scope.Counter("updated"),
}
@@ -108,7 +108,8 @@ func newRuleSet(
onRuleSetUpdatedFn: opts.OnRuleSetUpdatedFn(),
proto: &rulepb.RuleSet{},
version: kv.UninitializedVersion,
- metrics: newRuleSetMetrics(instrumentOpts.MetricsScope(), instrumentOpts.MetricsSamplingRate()),
+ metrics: newRuleSetMetrics(instrumentOpts.MetricsScope(),
+ instrumentOpts.TimerOptions()),
}
valueOpts := runtime.NewOptions().
SetInstrumentOptions(opts.InstrumentOptions()).
diff --git a/src/metrics/metadata/metadata.go b/src/metrics/metadata/metadata.go
index fdd45fab17..8898930153 100644
--- a/src/metrics/metadata/metadata.go
+++ b/src/metrics/metadata/metadata.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/metrics/generated/proto/policypb"
"github.com/m3db/m3/src/metrics/pipeline/applied"
"github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/query/models"
)
var (
@@ -50,6 +51,14 @@ var (
// DropPipelineMetadatas is the drop policy list of pipeline metadatas.
DropPipelineMetadatas = []PipelineMetadata{DropPipelineMetadata}
+ // DropIfOnlyMatchPipelineMetadata is the drop if only match policy
+ // pipeline metadata.
+ DropIfOnlyMatchPipelineMetadata = PipelineMetadata{DropPolicy: policy.DropIfOnlyMatch}
+
+ // DropIfOnlyMatchPipelineMetadatas is the drop if only match policy list
+ // of pipeline metadatas.
+ DropIfOnlyMatchPipelineMetadatas = []PipelineMetadata{DropIfOnlyMatchPipelineMetadata}
+
// DropMetadata is the drop policy metadata.
DropMetadata = Metadata{Pipelines: DropPipelineMetadatas}
@@ -73,6 +82,12 @@ type PipelineMetadata struct {
// Drop policy.
DropPolicy policy.DropPolicy `json:"dropPolicy,omitempty"`
+
+ // Tags.
+ Tags []models.Tag `json:"tags"`
+
+ // GraphitePrefix is the list of graphite prefixes to apply.
+ GraphitePrefix [][]byte `json:"graphitePrefix"`
}
// Equal returns true if two pipeline metadata are considered equal.
@@ -185,9 +200,11 @@ func (metadatas PipelineMetadatas) Clone() PipelineMetadatas {
type ApplyOrRemoveDropPoliciesResult uint
const (
+ // NoDropPolicyPresentResult is the result of no drop policies being present.
+ NoDropPolicyPresentResult ApplyOrRemoveDropPoliciesResult = iota
// AppliedEffectiveDropPolicyResult is the result of applying the drop
// policy and returning just the single drop policy pipeline.
- AppliedEffectiveDropPolicyResult ApplyOrRemoveDropPoliciesResult = iota
+ AppliedEffectiveDropPolicyResult
// RemovedIneffectiveDropPoliciesResult is the result of no drop policies
// being effective and returning the pipelines without any drop policies.
RemovedIneffectiveDropPoliciesResult
@@ -217,7 +234,7 @@ func (metadatas PipelineMetadatas) ApplyOrRemoveDropPolicies() (
if dropIfOnlyMatchPipelines == 0 {
// No drop if only match pipelines, no need to remove anything
- return metadatas, RemovedIneffectiveDropPoliciesResult
+ return metadatas, NoDropPolicyPresentResult
}
if nonDropPipelines == 0 {
@@ -421,62 +438,6 @@ func (sms StagedMetadatas) IsDropPolicyApplied() bool {
return len(sms) == 1 && sms[0].IsDropPolicyApplied()
}
-// ApplyOrRemoveDropPolicies applies or removes any drop policies staged
-// metadatas, if effective then just a single drop pipeline staged metadata
-// is returned otherwise if not effective it removes in each staged metadata
-// the drop policy from all pipelines and retains only non-drop policy
-// effective staged metadatas.
-func (sms StagedMetadatas) ApplyOrRemoveDropPolicies() (
- StagedMetadatas,
- ApplyOrRemoveDropPoliciesResult,
-) {
- if len(sms) == 0 {
- return sms, RemovedIneffectiveDropPoliciesResult
- }
-
- var (
- dropStagedMetadatas = 0
- nonDropStagedMetadatas = 0
- result = sms
- earliestDropStagedMetadata StagedMetadata
- )
-
- for i := len(result) - 1; i >= 0; i-- {
- var applyOrRemoveResult ApplyOrRemoveDropPoliciesResult
- sms[i].Pipelines, applyOrRemoveResult = sms[i].Pipelines.ApplyOrRemoveDropPolicies()
-
- switch applyOrRemoveResult {
- case AppliedEffectiveDropPolicyResult:
- dropStagedMetadatas++
-
- // Track the drop staged metadata so we can return it if we need to
- if dropStagedMetadatas == 1 ||
- sms[i].CutoverNanos < earliestDropStagedMetadata.CutoverNanos {
- earliestDropStagedMetadata = sms[i]
- }
-
- // Remove by moving to tail and decrementing length so we can do in
- // place to avoid allocations of a new slice
- if lastElem := i == len(result)-1; lastElem {
- result = result[0:i]
- } else {
- result = append(result[0:i], result[i+1:]...)
- }
- default:
- // Not an effective drop staged metadata
- nonDropStagedMetadatas++
- }
- }
-
- if nonDropStagedMetadatas == 0 {
- // If there were no non-drop staged metadatas, then just return the
- // canonical drop staged metadatas
- return StagedMetadatas{earliestDropStagedMetadata}, AppliedEffectiveDropPolicyResult
- }
-
- return result, RemovedIneffectiveDropPoliciesResult
-}
-
// ToProto converts the staged metadatas to a protobuf message in place.
func (sms StagedMetadatas) ToProto(pb *metricpb.StagedMetadatas) error {
numMetadatas := len(sms)
diff --git a/src/metrics/metadata/metadata_test.go b/src/metrics/metadata/metadata_test.go
index a90b74ac50..abd8087061 100644
--- a/src/metrics/metadata/metadata_test.go
+++ b/src/metrics/metadata/metadata_test.go
@@ -1087,8 +1087,8 @@ func TestVersionedStagedMetadatasMarshalJSON(t *testing.T) {
`{"version":12,` +
`"stagedMetadatas":` +
`[{"metadata":{"pipelines":[` +
- `{"aggregation":["Sum"],"storagePolicies":["1s:1h","1m:12h"]},` +
- `{"aggregation":null,"storagePolicies":["10s:1h"]}]},` +
+ `{"aggregation":["Sum"],"storagePolicies":["1s:1h","1m:12h"],"tags":null,"graphitePrefix":null},` +
+ `{"aggregation":null,"storagePolicies":["10s:1h"],"tags":null,"graphitePrefix":null}]},` +
`"cutoverNanos":4567,` +
`"tombstoned":true}]}`
require.Equal(t, expected, string(res))
@@ -1249,92 +1249,10 @@ func TestApplyOrRemoveDropPoliciesDropIfOnlyMatchNone(t *testing.T) {
},
}
output, result := input.ApplyOrRemoveDropPolicies()
- require.Equal(t, RemovedIneffectiveDropPoliciesResult, result)
+ require.Equal(t, NoDropPolicyPresentResult, result)
require.True(t, output.Equal(input))
}
-func TestStagedMetadatasApplyOrRemoveDropPoliciesRemovingAnyDropStagedMetadata(t *testing.T) {
- validStagedMetadatas := StagedMetadatas{
- StagedMetadata{
- Metadata: Metadata{Pipelines: PipelineMetadatas{
- {
- AggregationID: aggregation.MustCompressTypes(aggregation.Sum),
- StoragePolicies: []policy.StoragePolicy{
- policy.NewStoragePolicy(time.Second, xtime.Second, time.Hour),
- policy.NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour),
- },
- DropPolicy: policy.DropNone,
- },
- }},
- },
- StagedMetadata{
- Metadata: Metadata{Pipelines: PipelineMetadatas{
- {
- AggregationID: aggregation.MustCompressTypes(aggregation.Sum),
- StoragePolicies: []policy.StoragePolicy{
- policy.NewStoragePolicy(time.Minute, xtime.Minute, 12*time.Hour),
- policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 24*time.Hour),
- },
- DropPolicy: policy.DropNone,
- },
- }},
- },
- }
-
- // Run test for every single insertion point
- for i := 0; i < len(validStagedMetadatas)+1; i++ {
- t.Run(fmt.Sprintf("test insert drop if only rule at %d", i),
- func(t *testing.T) {
- var (
- copy = append(StagedMetadatas(nil), validStagedMetadatas...)
- input StagedMetadatas
- )
- for j := 0; j < len(validStagedMetadatas)+1; j++ {
- if j == i {
- // Insert the drop if only match rule at this position
- input = append(input, DropStagedMetadata)
- } else {
- input = append(input, copy[0])
- copy = copy[1:]
- }
- }
-
- output, result := input.ApplyOrRemoveDropPolicies()
- require.Equal(t, RemovedIneffectiveDropPoliciesResult, result)
- require.True(t, output.Equal(validStagedMetadatas))
- })
- }
-}
-
-func TestStagedMetadatasApplyOrRemoveDropPoliciesApplyingDropStagedMetadata(t *testing.T) {
- // Check compacts together and chooses earliest staged metadata
- metadatas, result := StagedMetadatas{
- StagedMetadata{Metadata: DropMetadata, CutoverNanos: 456},
- StagedMetadata{Metadata: DropMetadata, CutoverNanos: 123},
- }.ApplyOrRemoveDropPolicies()
-
- require.True(t, metadatas.Equal(StagedMetadatas{StagedMetadata{
- Metadata: DropMetadata, CutoverNanos: 123},
- }))
- require.Equal(t, AppliedEffectiveDropPolicyResult, result)
-
- // Check single also returns as expected
- metadatas, result = StagedMetadatas{
- StagedMetadata{Metadata: DropMetadata, CutoverNanos: 123},
- }.ApplyOrRemoveDropPolicies()
-
- require.True(t, metadatas.Equal(StagedMetadatas{StagedMetadata{
- Metadata: DropMetadata, CutoverNanos: 123},
- }))
- require.Equal(t, AppliedEffectiveDropPolicyResult, result)
-}
-
-func TestStagedMetadatasApplyOrRemoveDropPoliciesWithNoStagedMetadatasIsNoOp(t *testing.T) {
- metadatas, result := StagedMetadatas{}.ApplyOrRemoveDropPolicies()
- require.Equal(t, 0, len(metadatas))
- require.Equal(t, RemovedIneffectiveDropPoliciesResult, result)
-}
-
func TestStagedMetadatasDropReturnsIsDropPolicyAppliedTrue(t *testing.T) {
require.True(t, StagedMetadatas{
StagedMetadata{Metadata: DropMetadata, CutoverNanos: 123},
diff --git a/src/metrics/metric/aggregated/types.go b/src/metrics/metric/aggregated/types.go
index fad7dd952a..4415dfa039 100644
--- a/src/metrics/metric/aggregated/types.go
+++ b/src/metrics/metric/aggregated/types.go
@@ -33,8 +33,9 @@ import (
)
var (
- errNilForwardedMetricWithMetadataProto = errors.New("nil forwarded metric with metadata proto message")
- errNilTimedMetricWithMetadataProto = errors.New("nil timed metric with metadata proto message")
+ errNilForwardedMetricWithMetadataProto = errors.New("nil forwarded metric with metadata proto message")
+ errNilTimedMetricWithMetadataProto = errors.New("nil timed metric with metadata proto message")
+ errNilPassthroughMetricWithMetadataProto = errors.New("nil passthrough metric with metadata proto message")
)
// Metric is a metric, which is essentially a named value at certain time.
@@ -229,3 +230,53 @@ func (tm *TimedMetricWithMetadata) FromProto(pb *metricpb.TimedMetricWithMetadat
}
return tm.TimedMetadata.FromProto(pb.Metadata)
}
+
+// TimedMetricWithMetadatas is a timed metric with staged metadatas.
+type TimedMetricWithMetadatas struct {
+ Metric
+ metadata.StagedMetadatas
+}
+
+// ToProto converts the timed metric with metadata to a protobuf message in place.
+func (tm TimedMetricWithMetadatas) ToProto(pb *metricpb.TimedMetricWithMetadatas) error {
+ if err := tm.Metric.ToProto(&pb.Metric); err != nil {
+ return err
+ }
+ return tm.StagedMetadatas.ToProto(&pb.Metadatas)
+}
+
+// FromProto converts the protobuf message to a timed metric with metadata in place.
+func (tm *TimedMetricWithMetadatas) FromProto(pb *metricpb.TimedMetricWithMetadatas) error {
+ if pb == nil {
+ return errNilTimedMetricWithMetadataProto
+ }
+ if err := tm.Metric.FromProto(pb.Metric); err != nil {
+ return err
+ }
+ return tm.StagedMetadatas.FromProto(pb.Metadatas)
+}
+
+// PassthroughMetricWithMetadata is a passthrough metric with metadata.
+type PassthroughMetricWithMetadata struct {
+ Metric
+ policy.StoragePolicy
+}
+
+// ToProto converts the passthrough metric with metadata to a protobuf message in place.
+func (pm PassthroughMetricWithMetadata) ToProto(pb *metricpb.TimedMetricWithStoragePolicy) error {
+ if err := pm.Metric.ToProto(&pb.TimedMetric); err != nil {
+ return err
+ }
+ return pm.StoragePolicy.ToProto(&pb.StoragePolicy)
+}
+
+// FromProto converts the protobuf message to a timed metric with metadata in place.
+func (pm *PassthroughMetricWithMetadata) FromProto(pb *metricpb.TimedMetricWithStoragePolicy) error {
+ if pb == nil {
+ return errNilPassthroughMetricWithMetadataProto
+ }
+ if err := pm.Metric.FromProto(pb.TimedMetric); err != nil {
+ return err
+ }
+ return pm.StoragePolicy.FromProto(pb.StoragePolicy)
+}
diff --git a/src/metrics/metric/types.go b/src/metrics/metric/types.go
index ec74cd1eaf..521629cba6 100644
--- a/src/metrics/metric/types.go
+++ b/src/metrics/metric/types.go
@@ -45,6 +45,19 @@ var validTypes = []Type{
GaugeType,
}
+var (
+ M3CounterValue = []byte("counter")
+ M3GaugeValue = []byte("gauge")
+ M3TimerValue = []byte("timer")
+
+ M3MetricsPrefix = []byte("__m3")
+ M3MetricsPrefixString = string(M3MetricsPrefix)
+
+ M3TypeTag = []byte(M3MetricsPrefixString + "_type__")
+ M3MetricsGraphiteAggregation = []byte(M3MetricsPrefixString + "_graphite_aggregation__")
+ M3MetricsGraphitePrefix = []byte(M3MetricsPrefixString + "_graphite_prefix__")
+)
+
func (t Type) String() string {
switch t {
case UnknownType:
diff --git a/src/metrics/policy/drop_policy.go b/src/metrics/policy/drop_policy.go
index 02ba8c82ee..05b8747987 100644
--- a/src/metrics/policy/drop_policy.go
+++ b/src/metrics/policy/drop_policy.go
@@ -29,10 +29,10 @@ const (
// DropNone specifies not to drop any of the matched metrics.
DropNone DropPolicy = iota
// DropMust specifies to always drop matched metrics, irregardless of
- // other rules.
+ // other rules. Metrics are not dropped from the rollup rules.
DropMust
// DropIfOnlyMatch specifies to drop matched metrics, but only if no
- // other rules match.
+ // other rules match. Metrics are not dropped from the rollup rules.
DropIfOnlyMatch
// DefaultDropPolicy is to drop none.
diff --git a/src/metrics/policy/storage_policy.go b/src/metrics/policy/storage_policy.go
index 7c409bac78..1c4221a22e 100644
--- a/src/metrics/policy/storage_policy.go
+++ b/src/metrics/policy/storage_policy.go
@@ -73,6 +73,14 @@ func NewStoragePolicyFromProto(pb *policypb.StoragePolicy) (StoragePolicy, error
return sp, nil
}
+// Equivalent returns whether two storage policies are equal by their
+// retention width and resolution. The resolution precision is ignored
+// for equivalency (hence why the method is not named Equal).
+func (p StoragePolicy) Equivalent(other StoragePolicy) bool {
+ return p.resolution.Window == other.resolution.Window &&
+ p.retention == other.retention
+}
+
// String is the string representation of a storage policy.
func (p StoragePolicy) String() string {
return fmt.Sprintf("%s%s%s", p.resolution.String(), resolutionRetentionSeparator, p.retention.String())
diff --git a/src/metrics/rules/active_ruleset.go b/src/metrics/rules/active_ruleset.go
index 2b1b0f95ad..f1b0986cfb 100644
--- a/src/metrics/rules/active_ruleset.go
+++ b/src/metrics/rules/active_ruleset.go
@@ -235,12 +235,12 @@ func (as *activeRuleSet) mappingsForNonRollupID(
AggregationID: snapshot.aggregationID,
StoragePolicies: snapshot.storagePolicies.Clone(),
DropPolicy: snapshot.dropPolicy,
+ Tags: snapshot.tags,
+ GraphitePrefix: snapshot.graphitePrefix,
}
pipelines = append(pipelines, pipeline)
}
- pipelines, _ = metadata.PipelineMetadatas(pipelines).ApplyOrRemoveDropPolicies()
-
// NB: The pipeline list should never be empty as the resulting pipelines are
// used to determine how the *existing* ID is aggregated and retained. If there
// are no rule match, the default pipeline list is used.
@@ -749,16 +749,6 @@ func (res *ruleMatchResults) unique() *ruleMatchResults {
return res
}
- // First resolve if drop policies are in effect
- var (
- evaluate = metadata.PipelineMetadatas(res.pipelines)
- dropApplyResult metadata.ApplyOrRemoveDropPoliciesResult
- )
- res.pipelines, dropApplyResult = evaluate.ApplyOrRemoveDropPolicies()
- if dropApplyResult == metadata.AppliedEffectiveDropPolicyResult {
- return res
- }
-
// Otherwise merge as per usual
curr := 0
for i := 1; i < len(res.pipelines); i++ {
diff --git a/src/metrics/rules/active_ruleset_test.go b/src/metrics/rules/active_ruleset_test.go
index 80bea6fe0f..0a234fefee 100644
--- a/src/metrics/rules/active_ruleset_test.go
+++ b/src/metrics/rules/active_ruleset_test.go
@@ -34,6 +34,7 @@ import (
"github.com/m3db/m3/src/metrics/pipeline/applied"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/metrics/transformation"
+ "github.com/m3db/m3/src/query/models"
xtime "github.com/m3db/m3/src/x/time"
"github.com/google/go-cmp/cmp"
@@ -495,7 +496,15 @@ func TestActiveRuleSetForwardMatchWithMappingRules(t *testing.T) {
CutoverNanos: 20000,
Tombstoned: false,
Metadata: metadata.Metadata{
- Pipelines: metadata.DropPipelineMetadatas,
+ Pipelines: []metadata.PipelineMetadata{
+ {
+ AggregationID: aggregation.DefaultID,
+ StoragePolicies: policy.StoragePolicies{
+ policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour),
+ },
+ },
+ metadata.DropPipelineMetadata,
+ },
},
},
},
@@ -510,7 +519,7 @@ func TestActiveRuleSetForwardMatchWithMappingRules(t *testing.T) {
CutoverNanos: 20000,
Tombstoned: false,
Metadata: metadata.Metadata{
- Pipelines: metadata.DropPipelineMetadatas,
+ Pipelines: metadata.DropIfOnlyMatchPipelineMetadatas,
},
},
},
@@ -532,6 +541,7 @@ func TestActiveRuleSetForwardMatchWithMappingRules(t *testing.T) {
policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour),
},
},
+ metadata.DropIfOnlyMatchPipelineMetadata,
},
},
},
@@ -2385,7 +2395,36 @@ func TestActiveRuleSetForwardMatchWithMappingRulesAndRollupRules(t *testing.T) {
CutoverNanos: 35000,
Tombstoned: false,
Metadata: metadata.Metadata{
- Pipelines: metadata.DropPipelineMetadatas,
+ Pipelines: []metadata.PipelineMetadata{
+ {
+ AggregationID: aggregation.DefaultID,
+ StoragePolicies: policy.StoragePolicies{
+ policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour),
+ },
+ },
+ metadata.DropPipelineMetadata,
+ {
+ AggregationID: aggregation.DefaultID,
+ StoragePolicies: policy.StoragePolicies{
+ policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour),
+ },
+ Pipeline: applied.NewPipeline([]applied.OpUnion{
+ {
+ Type: pipeline.TransformationOpType,
+ Transformation: pipeline.TransformationOp{
+ Type: transformation.PerSecond,
+ },
+ },
+ {
+ Type: pipeline.RollupOpType,
+ Rollup: applied.RollupOp{
+ ID: b("rName1|rtagName1=rtagValue1,rtagName2=rtagValue2"),
+ AggregationID: aggregation.DefaultID,
+ },
+ },
+ }),
+ },
+ },
},
},
},
@@ -2854,46 +2893,54 @@ func TestActiveRuleSetReverseMatchWithMappingRulesForNonRollupID(t *testing.T) {
},
},
{
- id: "shouldDropTagName1=shouldDropTagValue1",
- matchFrom: 25000,
- matchTo: 25001,
+ id: "shouldDropTagName1=shouldDropTagValue1",
+ matchFrom: 25000,
+ matchTo: 25001,
metricType: metric.CounterType,
aggregationType: aggregation.Sum,
- expireAtNanos: 30000,
+ expireAtNanos: 30000,
forExistingIDResult: metadata.StagedMetadatas{
metadata.StagedMetadata{
CutoverNanos: 20000,
Tombstoned: false,
Metadata: metadata.Metadata{
- Pipelines: metadata.DropPipelineMetadatas,
+ Pipelines: []metadata.PipelineMetadata{
+ {
+ AggregationID: aggregation.DefaultID,
+ StoragePolicies: policy.StoragePolicies{
+ policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour),
+ },
+ },
+ metadata.DropPipelineMetadata,
+ },
},
},
},
},
{
- id: "shouldDrop2TagName1=shouldDrop2TagValue1",
- matchFrom: 25000,
- matchTo: 25001,
+ id: "shouldDrop2TagName1=shouldDrop2TagValue1",
+ matchFrom: 25000,
+ matchTo: 25001,
metricType: metric.CounterType,
aggregationType: aggregation.Sum,
- expireAtNanos: 30000,
+ expireAtNanos: 30000,
forExistingIDResult: metadata.StagedMetadatas{
metadata.StagedMetadata{
CutoverNanos: 20000,
Tombstoned: false,
Metadata: metadata.Metadata{
- Pipelines: metadata.DropPipelineMetadatas,
+ Pipelines: metadata.DropIfOnlyMatchPipelineMetadatas,
},
},
},
},
{
- id: "shouldNotDropTagName1=shouldNotDropTagValue1",
- matchFrom: 25000,
- matchTo: 25001,
+ id: "shouldNotDropTagName1=shouldNotDropTagValue1",
+ matchFrom: 25000,
+ matchTo: 25001,
metricType: metric.CounterType,
aggregationType: aggregation.Sum,
- expireAtNanos: 30000,
+ expireAtNanos: 30000,
forExistingIDResult: metadata.StagedMetadatas{
metadata.StagedMetadata{
CutoverNanos: 20000,
@@ -2906,6 +2953,7 @@ func TestActiveRuleSetReverseMatchWithMappingRulesForNonRollupID(t *testing.T) {
policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour),
},
},
+ metadata.DropIfOnlyMatchPipelineMetadata,
},
},
},
@@ -2926,7 +2974,7 @@ func TestActiveRuleSetReverseMatchWithMappingRulesForNonRollupID(t *testing.T) {
for i, input := range inputs {
t.Run(fmt.Sprintf("input %d", i), func(t *testing.T) {
res := as.ReverseMatch(b(input.id), input.matchFrom, input.matchTo,
- input.metricType, input.aggregationType, isMultiAggregationTypesAllowed, aggTypesOpts)
+ input.metricType, input.aggregationType, isMultiAggregationTypesAllowed, aggTypesOpts)
require.Equal(t, input.expireAtNanos, res.expireAtNanos)
require.True(t, cmp.Equal(input.forExistingIDResult, res.ForExistingIDAt(0), testStagedMetadatasCmptOpts...))
})
@@ -3179,6 +3227,7 @@ func testMappingRules(t *testing.T) []*mappingRule {
)
require.NoError(t, err)
+ tags := []models.Tag{{Name: []byte("service")}}
mappingRule1 := &mappingRule{
uuid: "mappingRule1",
snapshots: []*mappingRuleSnapshot{
@@ -3191,6 +3240,7 @@ func testMappingRules(t *testing.T) []*mappingRule {
storagePolicies: policy.StoragePolicies{
policy.NewStoragePolicy(10*time.Second, xtime.Second, 24*time.Hour),
},
+ tags: tags,
},
&mappingRuleSnapshot{
name: "mappingRule1.snapshot1",
@@ -3201,6 +3251,7 @@ func testMappingRules(t *testing.T) []*mappingRule {
storagePolicies: policy.StoragePolicies{
policy.NewStoragePolicy(10*time.Second, xtime.Second, 12*time.Hour),
},
+ tags: tags,
},
&mappingRuleSnapshot{
name: "mappingRule1.snapshot2",
@@ -3213,6 +3264,7 @@ func testMappingRules(t *testing.T) []*mappingRule {
policy.NewStoragePolicy(5*time.Minute, xtime.Minute, 48*time.Hour),
policy.NewStoragePolicy(10*time.Minute, xtime.Minute, 48*time.Hour),
},
+ tags: tags,
},
&mappingRuleSnapshot{
name: "mappingRule1.snapshot3",
@@ -3223,6 +3275,7 @@ func testMappingRules(t *testing.T) []*mappingRule {
storagePolicies: policy.StoragePolicies{
policy.NewStoragePolicy(30*time.Second, xtime.Second, 6*time.Hour),
},
+ tags: tags,
},
},
}
@@ -3352,8 +3405,9 @@ func testMappingRules(t *testing.T) []*mappingRule {
},
}
- // Mapping rule 7 and 8 should combine to effectively be a drop when combined as
- // mapping rule 8 explicitly says must be dropped
+ // Mapping rule 7 and 8 should combine to have the the aggregation as per
+ // mapping rule 7 to occur with the metrics being dropped for the default
+ // aggregation as per mapping rule 8 which explicitly says must be dropped
mappingRule7 := &mappingRule{
uuid: "mappingRule7",
snapshots: []*mappingRuleSnapshot{
@@ -3402,8 +3456,10 @@ func testMappingRules(t *testing.T) []*mappingRule {
},
}
- // Mapping rule 10 and 11 should combine to effectively be a no-drop when combined as
- // mapping rule 10 explicitly says drop only if no other drops
+ // Mapping rule 10 and 11 should combine to have the the aggregation as per
+ // mapping rule 10 to occur with the metrics being dropped for the default
+ // aggregation as per mapping rule 11 which says it must be dropped on
+ // match
mappingRule10 := &mappingRule{
uuid: "mappingRule10",
snapshots: []*mappingRuleSnapshot{
diff --git a/src/metrics/rules/mapping.go b/src/metrics/rules/mapping.go
index abd444bb4d..a8238907d9 100644
--- a/src/metrics/rules/mapping.go
+++ b/src/metrics/rules/mapping.go
@@ -21,6 +21,7 @@
package rules
import (
+ "bytes"
"errors"
"fmt"
"time"
@@ -28,10 +29,13 @@ import (
"github.com/m3db/m3/src/metrics/aggregation"
merrors "github.com/m3db/m3/src/metrics/errors"
"github.com/m3db/m3/src/metrics/filters"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/generated/proto/policypb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
+ "github.com/m3db/m3/src/metrics/metric"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/metrics/rules/view"
+ "github.com/m3db/m3/src/query/models"
"github.com/pborman/uuid"
)
@@ -47,6 +51,8 @@ var (
errMappingRuleSnapshotIndexOutOfRange = errors.New("mapping rule snapshot index out of range")
errNilMappingRuleSnapshotProto = errors.New("nil mapping rule snapshot proto")
errNilMappingRuleProto = errors.New("nil mapping rule proto")
+
+ pathSeparator = []byte(".")
)
// mappingRuleSnapshot defines a rule snapshot such that if a metric matches the
@@ -60,6 +66,8 @@ type mappingRuleSnapshot struct {
aggregationID aggregation.ID
storagePolicies policy.StoragePolicies
dropPolicy policy.DropPolicy
+ tags []models.Tag
+ graphitePrefix [][]byte
lastUpdatedAtNanos int64
lastUpdatedBy string
}
@@ -128,6 +136,7 @@ func newMappingRuleSnapshotFromProto(
aggregationID,
storagePolicies,
policy.DropPolicy(r.DropPolicy),
+ models.TagsFromProto(r.Tags),
r.LastUpdatedAtNanos,
r.LastUpdatedBy,
), nil
@@ -141,6 +150,7 @@ func newMappingRuleSnapshotFromFields(
aggregationID aggregation.ID,
storagePolicies policy.StoragePolicies,
dropPolicy policy.DropPolicy,
+ tags []models.Tag,
lastUpdatedAtNanos int64,
lastUpdatedBy string,
) (*mappingRuleSnapshot, error) {
@@ -156,6 +166,7 @@ func newMappingRuleSnapshotFromFields(
aggregationID,
storagePolicies,
dropPolicy,
+ tags,
lastUpdatedAtNanos,
lastUpdatedBy,
), nil
@@ -172,9 +183,19 @@ func newMappingRuleSnapshotFromFieldsInternal(
aggregationID aggregation.ID,
storagePolicies policy.StoragePolicies,
dropPolicy policy.DropPolicy,
+ tags []models.Tag,
lastUpdatedAtNanos int64,
lastUpdatedBy string,
) *mappingRuleSnapshot {
+ // If we have a graphite prefix tag, then parse that out here so that it
+ // can be used later.
+ var graphitePrefix [][]byte
+ for _, tag := range tags {
+ if bytes.Equal(tag.Name, metric.M3MetricsGraphitePrefix) {
+ graphitePrefix = bytes.Split(tag.Value, pathSeparator)
+ }
+ }
+
return &mappingRuleSnapshot{
name: name,
tombstoned: tombstoned,
@@ -184,6 +205,8 @@ func newMappingRuleSnapshotFromFieldsInternal(
aggregationID: aggregationID,
storagePolicies: storagePolicies,
dropPolicy: dropPolicy,
+ tags: tags,
+ graphitePrefix: graphitePrefix,
lastUpdatedAtNanos: lastUpdatedAtNanos,
lastUpdatedBy: lastUpdatedBy,
}
@@ -194,6 +217,8 @@ func (mrs *mappingRuleSnapshot) clone() mappingRuleSnapshot {
if mrs.filter != nil {
filter = mrs.filter.Clone()
}
+ tags := make([]models.Tag, len(mrs.tags))
+ copy(tags, mrs.tags)
return mappingRuleSnapshot{
name: mrs.name,
tombstoned: mrs.tombstoned,
@@ -203,6 +228,7 @@ func (mrs *mappingRuleSnapshot) clone() mappingRuleSnapshot {
aggregationID: mrs.aggregationID,
storagePolicies: mrs.storagePolicies.Clone(),
dropPolicy: mrs.dropPolicy,
+ tags: mrs.tags,
lastUpdatedAtNanos: mrs.lastUpdatedAtNanos,
lastUpdatedBy: mrs.lastUpdatedBy,
}
@@ -222,7 +248,10 @@ func (mrs *mappingRuleSnapshot) proto() (*rulepb.MappingRuleSnapshot, error) {
if err != nil {
return nil, err
}
-
+ tags := make([]*metricpb.Tag, 0, len(mrs.tags))
+ for _, tag := range mrs.tags {
+ tags = append(tags, tag.ToProto())
+ }
return &rulepb.MappingRuleSnapshot{
Name: mrs.name,
Tombstoned: mrs.tombstoned,
@@ -233,6 +262,7 @@ func (mrs *mappingRuleSnapshot) proto() (*rulepb.MappingRuleSnapshot, error) {
AggregationTypes: pbAggTypes,
StoragePolicies: storagePolicies,
DropPolicy: policypb.DropPolicy(mrs.dropPolicy),
+ Tags: tags,
}, nil
}
@@ -343,6 +373,7 @@ func (mc *mappingRule) addSnapshot(
aggregationID aggregation.ID,
storagePolicies policy.StoragePolicies,
dropPolicy policy.DropPolicy,
+ tags []models.Tag,
meta UpdateMetadata,
) error {
snapshot, err := newMappingRuleSnapshotFromFields(
@@ -353,6 +384,7 @@ func (mc *mappingRule) addSnapshot(
aggregationID,
storagePolicies,
dropPolicy,
+ tags,
meta.updatedAtNanos,
meta.updatedBy,
)
@@ -393,6 +425,7 @@ func (mc *mappingRule) revive(
aggregationID aggregation.ID,
storagePolicies policy.StoragePolicies,
dropPolicy policy.DropPolicy,
+ tags []models.Tag,
meta UpdateMetadata,
) error {
n, err := mc.name()
@@ -403,7 +436,7 @@ func (mc *mappingRule) revive(
return merrors.NewInvalidInputError(fmt.Sprintf("%s is not tombstoned", n))
}
return mc.addSnapshot(name, rawFilter, aggregationID, storagePolicies,
- dropPolicy, meta)
+ dropPolicy, tags, meta)
}
func (mc *mappingRule) activeIndex(timeNanos int64) int {
diff --git a/src/metrics/rules/mapping_test.go b/src/metrics/rules/mapping_test.go
index 8480b64983..a714bcd3be 100644
--- a/src/metrics/rules/mapping_test.go
+++ b/src/metrics/rules/mapping_test.go
@@ -29,10 +29,12 @@ import (
"github.com/m3db/m3/src/metrics/errors"
"github.com/m3db/m3/src/metrics/filters"
"github.com/m3db/m3/src/metrics/generated/proto/aggregationpb"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/generated/proto/policypb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/metrics/rules/view"
+ "github.com/m3db/m3/src/query/models"
xtime "github.com/m3db/m3/src/x/time"
"github.com/google/go-cmp/cmp"
@@ -139,6 +141,7 @@ var (
},
},
DropPolicy: policypb.DropPolicy_NONE,
+ Tags: []*metricpb.Tag{},
}
testMappingRuleSnapshot4V2Proto = &rulepb.MappingRuleSnapshot{
Name: "bar",
@@ -163,6 +166,7 @@ var (
},
},
DropPolicy: policypb.DropPolicy_NONE,
+ Tags: []*metricpb.Tag{},
}
testMappingRuleSnapshot5V2Proto = &rulepb.MappingRuleSnapshot{
Name: "foo",
@@ -173,6 +177,7 @@ var (
LastUpdatedBy: "someone",
StoragePolicies: []*policypb.StoragePolicy{},
DropPolicy: policypb.DropPolicy_DROP_MUST,
+ Tags: []*metricpb.Tag{},
}
testMappingRuleSnapshot6V2Proto = &rulepb.MappingRuleSnapshot{
Name: "foo",
@@ -183,6 +188,7 @@ var (
LastUpdatedBy: "someone-else",
StoragePolicies: []*policypb.StoragePolicy{},
DropPolicy: policypb.DropPolicy_DROP_IF_ONLY_MATCH,
+ Tags: []*metricpb.Tag{},
}
testMappingRule1V1Proto = &rulepb.MappingRule{
Uuid: "12669817-13ae-40e6-ba2f-33087b262c68",
@@ -217,6 +223,7 @@ var (
dropPolicy: policy.DropNone,
lastUpdatedAtNanos: 12345000000,
lastUpdatedBy: "someone",
+ tags: []models.Tag{},
}
testMappingRuleSnapshot2 = &mappingRuleSnapshot{
name: "bar",
@@ -231,6 +238,7 @@ var (
dropPolicy: policy.DropNone,
lastUpdatedAtNanos: 67890000000,
lastUpdatedBy: "someone-else",
+ tags: []models.Tag{},
}
testMappingRuleSnapshot3 = &mappingRuleSnapshot{
name: "foo",
@@ -246,6 +254,7 @@ var (
dropPolicy: policy.DropNone,
lastUpdatedAtNanos: 12345000000,
lastUpdatedBy: "someone",
+ tags: []models.Tag{},
}
testMappingRuleSnapshot4 = &mappingRuleSnapshot{
name: "bar",
@@ -259,6 +268,7 @@ var (
dropPolicy: policy.DropNone,
lastUpdatedAtNanos: 67890000000,
lastUpdatedBy: "someone-else",
+ tags: []models.Tag{},
}
testMappingRuleSnapshot5 = &mappingRuleSnapshot{
name: "foo",
@@ -270,6 +280,7 @@ var (
dropPolicy: policy.DropMust,
lastUpdatedAtNanos: 12345000000,
lastUpdatedBy: "someone",
+ tags: []models.Tag{},
}
testMappingRuleSnapshot6 = &mappingRuleSnapshot{
name: "foo",
@@ -281,6 +292,7 @@ var (
dropPolicy: policy.DropIfOnlyMatch,
lastUpdatedAtNanos: 67890000000,
lastUpdatedBy: "someone-else",
+ tags: []models.Tag{},
}
testMappingRule1 = &mappingRule{
uuid: "12669817-13ae-40e6-ba2f-33087b262c68",
@@ -389,6 +401,7 @@ func TestNewMappingRuleSnapshotFromProtoTombstoned(t *testing.T) {
Filter: "tag1:value1 tag2:value2",
LastUpdatedAtNanos: 12345000000,
LastUpdatedBy: "someone",
+ Tags: []*metricpb.Tag{},
}
res, err := newMappingRuleSnapshotFromProto(input, filterOpts)
require.NoError(t, err)
@@ -401,6 +414,7 @@ func TestNewMappingRuleSnapshotFromProtoTombstoned(t *testing.T) {
aggregationID: aggregation.DefaultID,
lastUpdatedAtNanos: 12345000000,
lastUpdatedBy: "someone",
+ tags: []models.Tag{},
}
require.True(t, cmp.Equal(expected, res, testMappingRuleSnapshotCmpOpts...))
require.NotNil(t, res.filter)
@@ -448,6 +462,7 @@ func TestNewMappingRuleSnapshotFromFields(t *testing.T) {
testMappingRuleSnapshot3.aggregationID,
testMappingRuleSnapshot3.storagePolicies,
testMappingRuleSnapshot3.dropPolicy,
+ testMappingRuleSnapshot3.tags,
testMappingRuleSnapshot3.lastUpdatedAtNanos,
testMappingRuleSnapshot3.lastUpdatedBy,
)
@@ -471,6 +486,7 @@ func TestNewMappingRuleSnapshotFromFieldsValidationError(t *testing.T) {
aggregation.DefaultID,
nil,
policy.DropNone,
+ nil,
1234,
"test_user",
)
@@ -620,6 +636,7 @@ func TestMappingRuleMarkTombstoned(t *testing.T) {
rawFilter: "tag1:value1 tag2:value2",
lastUpdatedAtNanos: 10000,
lastUpdatedBy: "john",
+ tags: []models.Tag{},
}
require.True(t, cmp.Equal(expected, rr.snapshots[1], testMappingRuleSnapshotCmpOpts...))
}
diff --git a/src/metrics/rules/ruleset.go b/src/metrics/rules/ruleset.go
index f1da9a5ffc..f7546551f4 100644
--- a/src/metrics/rules/ruleset.go
+++ b/src/metrics/rules/ruleset.go
@@ -354,6 +354,7 @@ func (rs *ruleSet) AddMappingRule(mrv view.MappingRule, meta UpdateMetadata) (st
mrv.AggregationID,
mrv.StoragePolicies,
mrv.DropPolicy,
+ mrv.Tags,
meta,
); err != nil {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "add", mrv.Name))
@@ -366,6 +367,7 @@ func (rs *ruleSet) AddMappingRule(mrv view.MappingRule, meta UpdateMetadata) (st
mrv.AggregationID,
mrv.StoragePolicies,
mrv.DropPolicy,
+ mrv.Tags,
meta,
); err != nil {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "revive", mrv.Name))
@@ -386,6 +388,7 @@ func (rs *ruleSet) UpdateMappingRule(mrv view.MappingRule, meta UpdateMetadata)
mrv.AggregationID,
mrv.StoragePolicies,
mrv.DropPolicy,
+ mrv.Tags,
meta,
); err != nil {
return xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "update", mrv.Name))
diff --git a/src/metrics/rules/ruleset_test.go b/src/metrics/rules/ruleset_test.go
index eab4d0379a..345edc3920 100644
--- a/src/metrics/rules/ruleset_test.go
+++ b/src/metrics/rules/ruleset_test.go
@@ -30,6 +30,7 @@ import (
merrors "github.com/m3db/m3/src/metrics/errors"
"github.com/m3db/m3/src/metrics/filters"
"github.com/m3db/m3/src/metrics/generated/proto/aggregationpb"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/generated/proto/pipelinepb"
"github.com/m3db/m3/src/metrics/generated/proto/policypb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
@@ -215,6 +216,7 @@ func TestNewRuleSetFromProtoToProtoRoundtrip(t *testing.T) {
require.NoError(t, err)
res, err := rs.Proto()
require.NoError(t, err)
+ require.Equal(t, proto.MappingRules[0].Snapshots[0], res.MappingRules[0].Snapshots[0])
require.Equal(t, proto, res)
}
@@ -448,7 +450,7 @@ func TestRuleSetClone(t *testing.T) {
rs := res.(*ruleSet)
rsClone := rs.Clone().(*ruleSet)
- require.True(t, cmp.Equal(rs, rsClone, testRuleSetCmpOpts...))
+ require.True(t, cmp.Equal(rs, rsClone, testRuleSetCmpOpts...), cmp.Diff(rs, rsClone, testRuleSetCmpOpts...))
for i, m := range rs.mappingRules {
require.False(t, m == rsClone.mappingRules[i])
}
@@ -1433,6 +1435,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
&rulepb.MappingRuleSnapshot{
Name: "mappingRule1.snapshot2",
@@ -1468,6 +1471,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
&rulepb.MappingRuleSnapshot{
Name: "mappingRule1.snapshot3",
@@ -1485,6 +1489,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
},
},
@@ -1507,6 +1512,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
&rulepb.MappingRuleSnapshot{
Name: "mappingRule2.snapshot2",
@@ -1536,6 +1542,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
&rulepb.MappingRuleSnapshot{
Name: "mappingRule2.snapshot3",
@@ -1565,6 +1572,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
},
},
@@ -1599,6 +1607,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
&rulepb.MappingRuleSnapshot{
Name: "mappingRule3.snapshot2",
@@ -1625,6 +1634,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
},
},
@@ -1650,6 +1660,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
},
},
@@ -1674,6 +1685,7 @@ func testMappingRulesConfig() []*rulepb.MappingRule {
},
},
},
+ Tags: []*metricpb.Tag{},
},
},
},
diff --git a/src/metrics/rules/store/kv/store_test.go b/src/metrics/rules/store/kv/store_test.go
index 09065796c8..fa1551131e 100644
--- a/src/metrics/rules/store/kv/store_test.go
+++ b/src/metrics/rules/store/kv/store_test.go
@@ -29,6 +29,7 @@ import (
"github.com/m3db/m3/src/cluster/kv/mem"
merrors "github.com/m3db/m3/src/metrics/errors"
"github.com/m3db/m3/src/metrics/generated/proto/aggregationpb"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/generated/proto/pipelinepb"
"github.com/m3db/m3/src/metrics/generated/proto/policypb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
@@ -104,6 +105,12 @@ var (
},
},
},
+ Tags: []*metricpb.Tag{
+ {
+ Name: []byte("name"),
+ Value: []byte("name"),
+ },
+ },
},
&rulepb.MappingRuleSnapshot{
Name: "foo",
@@ -130,6 +137,12 @@ var (
},
},
},
+ Tags: []*metricpb.Tag{
+ {
+ Name: []byte("name"),
+ Value: []byte("name"),
+ },
+ },
},
},
},
@@ -155,6 +168,12 @@ var (
},
},
},
+ Tags: []*metricpb.Tag{
+ {
+ Name: []byte("name"),
+ Value: []byte("name"),
+ },
+ },
},
},
},
diff --git a/src/metrics/rules/view/mapping.go b/src/metrics/rules/view/mapping.go
index 5214cf1422..a53ccf2609 100644
--- a/src/metrics/rules/view/mapping.go
+++ b/src/metrics/rules/view/mapping.go
@@ -23,6 +23,7 @@ package view
import (
"github.com/m3db/m3/src/metrics/aggregation"
"github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/query/models"
)
// MappingRule is a mapping rule model at a given point in time.
@@ -35,6 +36,7 @@ type MappingRule struct {
AggregationID aggregation.ID `json:"aggregation"`
StoragePolicies policy.StoragePolicies `json:"storagePolicies"`
DropPolicy policy.DropPolicy `json:"dropPolicy"`
+ Tags []models.Tag `json:"tags"`
LastUpdatedBy string `json:"lastUpdatedBy"`
LastUpdatedAtMillis int64 `json:"lastUpdatedAtMillis"`
}
diff --git a/src/metrics/transformation/binary.go b/src/metrics/transformation/binary.go
index 20b9810654..593b8fab53 100644
--- a/src/metrics/transformation/binary.go
+++ b/src/metrics/transformation/binary.go
@@ -29,8 +29,20 @@ const (
nanosPerSecond = time.Second / time.Nanosecond
)
+var (
+ // allows to use a single transform fn ref (instead of
+ // taking reference to it each time when converting to iface).
+ transformPerSecondFn = BinaryTransformFn(perSecond)
+ transformIncreaseFn = BinaryTransformFn(increase)
+)
+
+func transformPerSecond() BinaryTransform {
+ return transformPerSecondFn
+}
+
// perSecond computes the derivative between consecutive datapoints, taking into
// account the time interval between the values.
+// Note:
// * It skips NaN values.
// * It assumes the timestamps are monotonically increasing, and values are non-decreasing.
// If either of the two conditions is not met, an empty datapoint is returned.
@@ -45,3 +57,24 @@ func perSecond(prev, curr Datapoint) Datapoint {
rate := diff * float64(nanosPerSecond) / float64(curr.TimeNanos-prev.TimeNanos)
return Datapoint{TimeNanos: curr.TimeNanos, Value: rate}
}
+
+func transformIncrease() BinaryTransform {
+ return transformIncreaseFn
+}
+
+// increase computes the difference between consecutive datapoints, unlike
+// perSecond it does not account for the time interval between the values.
+// Note:
+// * It skips NaN values.
+// * It assumes the timestamps are monotonically increasing, and values are non-decreasing.
+// If either of the two conditions is not met, an empty datapoint is returned.
+func increase(prev, curr Datapoint) Datapoint {
+ if prev.TimeNanos >= curr.TimeNanos || math.IsNaN(prev.Value) || math.IsNaN(curr.Value) {
+ return emptyDatapoint
+ }
+ diff := curr.Value - prev.Value
+ if diff < 0 {
+ return emptyDatapoint
+ }
+ return Datapoint{TimeNanos: curr.TimeNanos, Value: diff}
+}
diff --git a/src/metrics/transformation/func.go b/src/metrics/transformation/func.go
index 016fe92ffd..80016bb49b 100644
--- a/src/metrics/transformation/func.go
+++ b/src/metrics/transformation/func.go
@@ -38,9 +38,31 @@ func (dp Datapoint) IsEmpty() bool { return math.IsNaN(dp.Value) }
// UnaryTransform is a unary transformation that takes a single
// datapoint as input and transforms it into a datapoint as output.
-type UnaryTransform func(dp Datapoint) Datapoint
+// It can keep state if it requires.
+type UnaryTransform interface {
+ Evaluate(dp Datapoint) Datapoint
+}
+
+// UnaryTransformFn implements UnaryTransform as a function.
+type UnaryTransformFn func(dp Datapoint) Datapoint
+
+// Evaluate implements UnaryTransform as a function.
+func (fn UnaryTransformFn) Evaluate(dp Datapoint) Datapoint {
+ return fn(dp)
+}
// BinaryTransform is a binary transformation that takes the
// previous and the current datapoint as input and produces
// a single datapoint as the transformation result.
-type BinaryTransform func(prev, curr Datapoint) Datapoint
+// It can keep state if it requires.
+type BinaryTransform interface {
+ Evaluate(prev, curr Datapoint) Datapoint
+}
+
+// BinaryTransformFn implements BinaryTransform as a function.
+type BinaryTransformFn func(prev, curr Datapoint) Datapoint
+
+// Evaluate implements BinaryTransform as a function.
+func (fn BinaryTransformFn) Evaluate(prev, curr Datapoint) Datapoint {
+ return fn(prev, curr)
+}
diff --git a/src/metrics/transformation/type.go b/src/metrics/transformation/type.go
index 56d1b2fd6e..fe0edae0f1 100644
--- a/src/metrics/transformation/type.go
+++ b/src/metrics/transformation/type.go
@@ -1,3 +1,4 @@
+// go:generate stringer -type=Type
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -34,6 +35,8 @@ const (
UnknownType Type = iota
Absolute
PerSecond
+ Increase
+ Add
)
// IsValid checks if the transformation type is valid.
@@ -53,6 +56,32 @@ func (t Type) IsBinaryTransform() bool {
return exists
}
+// NewOp returns a constructed operation that is allocated once and can be
+// reused.
+func (t Type) NewOp() (Op, error) {
+ var (
+ err error
+ unary UnaryTransform
+ binary BinaryTransform
+ )
+ switch {
+ case t.IsUnaryTransform():
+ unary, err = t.UnaryTransform()
+ case t.IsBinaryTransform():
+ binary, err = t.BinaryTransform()
+ default:
+ err = fmt.Errorf("unknown transformation type: %v", t)
+ }
+ if err != nil {
+ return Op{}, err
+ }
+ return Op{
+ opType: t,
+ unary: unary,
+ binary: binary,
+ }, nil
+}
+
// UnaryTransform returns the unary transformation function associated with
// the transformation type if applicable, or an error otherwise.
func (t Type) UnaryTransform() (UnaryTransform, error) {
@@ -60,7 +89,7 @@ func (t Type) UnaryTransform() (UnaryTransform, error) {
if !exists {
return nil, fmt.Errorf("%v is not a unary transfomration", t)
}
- return tf, nil
+ return tf(), nil
}
// MustUnaryTransform returns the unary transformation function associated with
@@ -80,7 +109,7 @@ func (t Type) BinaryTransform() (BinaryTransform, error) {
if !exists {
return nil, fmt.Errorf("%v is not a binary transfomration", t)
}
- return tf, nil
+ return tf(), nil
}
// MustBinaryTransform returns the binary transformation function associated with
@@ -100,6 +129,10 @@ func (t Type) ToProto(pb *transformationpb.TransformationType) error {
*pb = transformationpb.TransformationType_ABSOLUTE
case PerSecond:
*pb = transformationpb.TransformationType_PERSECOND
+ case Increase:
+ *pb = transformationpb.TransformationType_INCREASE
+ case Add:
+ *pb = transformationpb.TransformationType_ADD
default:
return fmt.Errorf("unknown transformation type: %v", t)
}
@@ -113,6 +146,10 @@ func (t *Type) FromProto(pb transformationpb.TransformationType) error {
*t = Absolute
case transformationpb.TransformationType_PERSECOND:
*t = PerSecond
+ case transformationpb.TransformationType_INCREASE:
+ *t = Increase
+ case transformationpb.TransformationType_ADD:
+ *t = Add
default:
return fmt.Errorf("unknown transformation type in proto: %v", pb)
}
@@ -160,12 +197,44 @@ func ParseType(str string) (Type, error) {
return t, nil
}
+// Op represents a transform operation.
+type Op struct {
+ opType Type
+
+ // might have either unary or binary
+ unary UnaryTransform
+ binary BinaryTransform
+}
+
+// Type returns the op type.
+func (o Op) Type() Type {
+ return o.opType
+}
+
+// UnaryTransform returns the active unary transform if op is unary transform.
+func (o Op) UnaryTransform() (UnaryTransform, bool) {
+ if !o.Type().IsUnaryTransform() {
+ return nil, false
+ }
+ return o.unary, true
+}
+
+// BinaryTransform returns the active binary transform if op is binary transform.
+func (o Op) BinaryTransform() (BinaryTransform, bool) {
+ if !o.Type().IsBinaryTransform() {
+ return nil, false
+ }
+ return o.binary, true
+}
+
var (
- unaryTransforms = map[Type]UnaryTransform{
- Absolute: absolute,
+ unaryTransforms = map[Type]func() UnaryTransform{
+ Absolute: transformAbsolute,
+ Add: transformAdd,
}
- binaryTransforms = map[Type]BinaryTransform{
- PerSecond: perSecond,
+ binaryTransforms = map[Type]func() BinaryTransform{
+ PerSecond: transformPerSecond,
+ Increase: transformIncrease,
}
typeStringMap map[string]Type
)
diff --git a/src/metrics/transformation/type_string.go b/src/metrics/transformation/type_string.go
index 37cb879c5c..028d8a4bc1 100644
--- a/src/metrics/transformation/type_string.go
+++ b/src/metrics/transformation/type_string.go
@@ -1,4 +1,6 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
+// Code generated by "stringer -type=Type"; DO NOT EDIT.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,19 +20,28 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// generated by stringer -type=Type; DO NOT EDIT
-
package transformation
-import "fmt"
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[UnknownType-0]
+ _ = x[Absolute-1]
+ _ = x[PerSecond-2]
+ _ = x[Increase-3]
+ _ = x[Add-4]
+}
-const _Type_name = "UnknownTypeAbsolutePerSecond"
+const _Type_name = "UnknownTypeAbsolutePerSecondIncreaseAdd"
-var _Type_index = [...]uint8{0, 11, 19, 28}
+var _Type_index = [...]uint8{0, 11, 19, 28, 36, 39}
func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) {
- return fmt.Sprintf("Type(%d)", i)
+ return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Type_name[_Type_index[i]:_Type_index[i+1]]
}
diff --git a/src/metrics/transformation/unary.go b/src/metrics/transformation/unary.go
index eec1d9a246..00f9d493f7 100644
--- a/src/metrics/transformation/unary.go
+++ b/src/metrics/transformation/unary.go
@@ -22,9 +22,33 @@ package transformation
import "math"
+var (
+ // allows to use a single transform fn ref (instead of
+ // taking reference to it each time when converting to iface).
+ transformAbsoluteFn = UnaryTransformFn(absolute)
+)
+
+func transformAbsolute() UnaryTransform {
+ return transformAbsoluteFn
+}
+
func absolute(dp Datapoint) Datapoint {
var res Datapoint
res.TimeNanos = dp.TimeNanos
res.Value = math.Abs(dp.Value)
return res
}
+
+// add will add add a datapoint to a running count and return the result, useful
+// for computing a running sum of values (like a monotonic increasing counter).
+// Note:
+// * It treats NaN as zero value, i.e. 42 + NaN = 42.
+func transformAdd() UnaryTransform {
+ var curr float64
+ return UnaryTransformFn(func(dp Datapoint) Datapoint {
+ if !math.IsNaN(dp.Value) {
+ curr += dp.Value
+ }
+ return Datapoint{TimeNanos: dp.TimeNanos, Value: curr}
+ })
+}
diff --git a/src/msg/README.md b/src/msg/README.md
index acf4cf6587..05c13e8495 100644
--- a/src/msg/README.md
+++ b/src/msg/README.md
@@ -1,7 +1,3 @@
-# M3msg [![Coverage Status](https://coveralls.io/repos/github/m3db/m3msg/badge.svg?branch=master)](https://coveralls.io/github/m3db/m3msg?branch=master)
+# M3Msg
A partitioned message queueing, routing and delivery library designed for very small messages at very high speeds that don't require disk durability. This makes it quite useful for metrics ingestion pipelines.
-
-
-
-This project is released under the [Apache License, Version 2.0](LICENSE).
diff --git a/src/msg/consumer/consumer.go b/src/msg/consumer/consumer.go
index 333e757af3..d23a8b273b 100644
--- a/src/msg/consumer/consumer.go
+++ b/src/msg/consumer/consumer.go
@@ -21,13 +21,13 @@
package consumer
import (
- "bufio"
"net"
"sync"
"time"
"github.com/m3db/m3/src/msg/generated/proto/msgpb"
"github.com/m3db/m3/src/msg/protocol/proto"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/uber-go/tally"
)
@@ -64,6 +64,7 @@ func (l *listener) Accept() (Consumer, error) {
if err != nil {
return nil, err
}
+
return newConsumer(conn, l.msgPool, l.opts, l.m), nil
}
@@ -92,7 +93,7 @@ type consumer struct {
mPool *messagePool
encoder proto.Encoder
decoder proto.Decoder
- w *bufio.Writer
+ w xio.ResettableWriter
conn net.Conn
ackPb msgpb.Ack
@@ -108,15 +109,23 @@ func newConsumer(
opts Options,
m metrics,
) *consumer {
+ var (
+ wOpts = xio.ResettableWriterOptions{
+ WriteBufferSize: opts.ConnectionWriteBufferSize(),
+ }
+
+ rwOpts = opts.DecoderOptions().RWOptions()
+ writerFn = rwOpts.ResettableWriterFn()
+ )
+
return &consumer{
opts: opts,
mPool: mPool,
encoder: proto.NewEncoder(opts.EncoderOptions()),
decoder: proto.NewDecoder(
- bufio.NewReaderSize(conn, opts.ConnectionReadBufferSize()),
- opts.DecoderOptions(),
+ conn, opts.DecoderOptions(), opts.ConnectionReadBufferSize(),
),
- w: bufio.NewWriterSize(conn, opts.ConnectionWriteBufferSize()),
+ w: writerFn(conn, wOpts),
conn: conn,
closed: false,
doneCh: make(chan struct{}),
@@ -200,6 +209,10 @@ func (c *consumer) encodeAckWithLock(ackLen int) error {
c.m.ackWriteError.Inc(1)
return err
}
+ if err := c.w.Flush(); err != nil {
+ c.m.ackWriteError.Inc(1)
+ return err
+ }
c.m.ackSent.Inc(int64(ackLen))
return nil
}
diff --git a/src/msg/consumer/consumer_test.go b/src/msg/consumer/consumer_test.go
index 66eb4a7b45..5889fb3609 100644
--- a/src/msg/consumer/consumer_test.go
+++ b/src/msg/consumer/consumer_test.go
@@ -403,7 +403,7 @@ func testProduceAndReceiveAck(t *testing.T, testMsg msgpb.Message, l Listener, o
m.Ack()
var ack msgpb.Ack
- err = proto.NewDecoder(conn, opts.DecoderOptions()).Decode(&ack)
+ err = proto.NewDecoder(conn, opts.DecoderOptions(), 10).Decode(&ack)
require.NoError(t, err)
require.Equal(t, 1, len(ack.Metadata))
require.Equal(t, testMsg.Metadata, ack.Metadata[0])
diff --git a/src/msg/consumer/handlers_test.go b/src/msg/consumer/handlers_test.go
index e07362f559..63ae802c3c 100644
--- a/src/msg/consumer/handlers_test.go
+++ b/src/msg/consumer/handlers_test.go
@@ -77,7 +77,7 @@ func TestServerWithMessageFn(t *testing.T) {
require.Equal(t, string(testMsg2.Value), data[1])
var ack msgpb.Ack
- testDecoder := proto.NewDecoder(conn, opts.DecoderOptions())
+ testDecoder := proto.NewDecoder(conn, opts.DecoderOptions(), 10)
err = testDecoder.Decode(&ack)
require.NoError(t, err)
require.Equal(t, 2, len(ack.Metadata))
@@ -133,7 +133,7 @@ func TestServerWithConsumeFn(t *testing.T) {
require.Equal(t, testMsg1.Value, bytes)
var ack msgpb.Ack
- testDecoder := proto.NewDecoder(conn, opts.DecoderOptions())
+ testDecoder := proto.NewDecoder(conn, opts.DecoderOptions(), 10)
err = testDecoder.Decode(&ack)
require.NoError(t, err)
require.Equal(t, 1, len(ack.Metadata))
diff --git a/src/msg/consumer/options.go b/src/msg/consumer/options.go
index 7712989322..f629a4c93d 100644
--- a/src/msg/consumer/options.go
+++ b/src/msg/consumer/options.go
@@ -25,13 +25,14 @@ import (
"github.com/m3db/m3/src/msg/protocol/proto"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
)
var (
- defaultAckBufferSize = 100
- defaultAckFlushInterval = time.Second
- defaultConnectionBufferSize = 16384
+ defaultAckBufferSize = 1048576
+ defaultAckFlushInterval = 200 * time.Millisecond
+ defaultConnectionBufferSize = 1048576
)
type options struct {
@@ -43,6 +44,7 @@ type options struct {
writeBufferSize int
readBufferSize int
iOpts instrument.Options
+ rwOpts xio.Options
}
// NewOptions creates a new options.
@@ -56,6 +58,7 @@ func NewOptions() Options {
writeBufferSize: defaultConnectionBufferSize,
readBufferSize: defaultConnectionBufferSize,
iOpts: instrument.NewOptions(),
+ rwOpts: xio.NewOptions(),
}
}
@@ -138,3 +141,13 @@ func (opts *options) SetInstrumentOptions(value instrument.Options) Options {
o.iOpts = value
return &o
}
+
+func (opts *options) SetRWOptions(value xio.Options) Options {
+ o := *opts
+ o.rwOpts = value
+ return &o
+}
+
+func (opts *options) RWOptions() xio.Options {
+ return opts.rwOpts
+}
diff --git a/src/msg/integration/integration_test.go b/src/msg/integration/integration_test.go
index be1de6c202..0c9cdf2092 100644
--- a/src/msg/integration/integration_test.go
+++ b/src/msg/integration/integration_test.go
@@ -1,5 +1,3 @@
-// +build integration
-
// Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -131,9 +129,9 @@ func TestSharedConsumerWithDeadInstance(t *testing.T) {
s.Run(t, ctrl)
s.VerifyConsumers(t)
testConsumers := s.consumerServices[0].testConsumers
- require.True(t, testConsumers[len(testConsumers)-1].consumed <= s.TotalMessages()*10/100)
+ require.True(t, testConsumers[len(testConsumers)-1].numConsumed() <= s.TotalMessages()*10/100)
testConsumers = s.consumerServices[1].testConsumers
- require.True(t, testConsumers[len(testConsumers)-1].consumed <= s.TotalMessages()*20/100)
+ require.True(t, testConsumers[len(testConsumers)-1].numConsumed() <= s.TotalMessages()*20/100)
}
}
@@ -548,8 +546,8 @@ func TestRemoveConsumerService(t *testing.T) {
)
s.Run(t, ctrl)
s.VerifyConsumers(t)
- require.Equal(t, msgPerShard*numberOfShards, len(s.consumerServices[0].consumed))
- require.Equal(t, msgPerShard*numberOfShards, len(s.consumerServices[1].consumed))
+ require.Equal(t, msgPerShard*numberOfShards, s.consumerServices[0].numConsumed())
+ require.Equal(t, msgPerShard*numberOfShards, s.consumerServices[1].numConsumed())
}
}
@@ -576,8 +574,8 @@ func TestAddConsumerService(t *testing.T) {
},
)
s.Run(t, ctrl)
- require.Equal(t, s.ExpectedNumMessages(), len(s.consumerServices[0].consumed))
- require.Equal(t, s.ExpectedNumMessages(), len(s.consumerServices[1].consumed))
- require.True(t, len(s.consumerServices[2].consumed) <= s.ExpectedNumMessages()*80/100)
+ require.Equal(t, s.ExpectedNumMessages(), s.consumerServices[0].numConsumed())
+ require.Equal(t, s.ExpectedNumMessages(), s.consumerServices[1].numConsumed())
+ require.True(t, s.consumerServices[2].numConsumed() <= s.ExpectedNumMessages()*80/100)
}
}
diff --git a/src/msg/integration/setup.go b/src/msg/integration/setup.go
index ab952addff..d797077021 100644
--- a/src/msg/integration/setup.go
+++ b/src/msg/integration/setup.go
@@ -39,6 +39,7 @@ import (
"github.com/m3db/m3/src/msg/producer/config"
"github.com/m3db/m3/src/msg/topic"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
xsync "github.com/m3db/m3/src/x/sync"
"github.com/golang/mock/gomock"
@@ -236,7 +237,7 @@ func (s *setup) Run(
func (s *setup) VerifyConsumers(t *testing.T) {
numWritesPerProducer := s.ExpectedNumMessages()
for _, cs := range s.consumerServices {
- require.Equal(t, numWritesPerProducer, len(cs.consumed))
+ require.Equal(t, numWritesPerProducer, cs.numConsumed())
}
}
@@ -406,6 +407,13 @@ func (cs *testConsumerService) markConsumed(b []byte) {
cs.consumed[string(b)] = struct{}{}
}
+func (cs *testConsumerService) numConsumed() int {
+ cs.Lock()
+ defer cs.Unlock()
+
+ return len(cs.consumed)
+}
+
func (cs *testConsumerService) Close() {
for _, c := range cs.testConsumers {
c.Close()
@@ -436,6 +444,13 @@ func (c *testConsumer) Close() {
close(c.doneCh)
}
+func (c *testConsumer) numConsumed() int {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.consumed
+}
+
func newTestConsumer(t *testing.T, cs *testConsumerService) *testConsumer {
consumerListener, err := consumer.NewListener("127.0.0.1:0", testConsumerOptions(t))
require.NoError(t, err)
@@ -538,8 +553,8 @@ writer:
topicName: topicName
topicWatchInitTimeout: 100ms
placementWatchInitTimeout: 100ms
- messagePool:
- size: 100
+ # FIXME: Consumers sharing the same pool trigger false-positives in race detector
+ messagePool: ~
messageRetry:
initialBackoff: 20ms
maxBackoff: 50ms
@@ -563,7 +578,7 @@ writer:
var cfg config.ProducerConfiguration
require.NoError(t, yaml.Unmarshal([]byte(str), &cfg))
- p, err := cfg.NewProducer(cs, instrument.NewOptions())
+ p, err := cfg.NewProducer(cs, instrument.NewOptions(), xio.NewOptions())
require.NoError(t, err)
return p
}
diff --git a/src/msg/producer/buffer/buffer.go b/src/msg/producer/buffer/buffer.go
index f000cd7348..c854182c5f 100644
--- a/src/msg/producer/buffer/buffer.go
+++ b/src/msg/producer/buffer/buffer.go
@@ -23,6 +23,7 @@ package buffer
import (
"container/list"
"errors"
+ "strconv"
"sync"
"time"
@@ -37,15 +38,17 @@ import (
var (
emptyStruct = struct{}{}
- errBufferFull = errors.New("buffer full")
+ // ErrBufferFull is returned when the buffer is full.
+ ErrBufferFull = errors.New("buffer full")
+
errBufferClosed = errors.New("buffer closed")
errMessageTooLarge = errors.New("message size larger than allowed")
errCleanupNoProgress = errors.New("buffer cleanup no progress")
)
type bufferMetrics struct {
- messageDropped tally.Counter
- byteDropped tally.Counter
+ messageDropped counterPerNumRefBuckets
+ byteDropped counterPerNumRefBuckets
messageTooLarge tally.Counter
cleanupNoProgress tally.Counter
dropOldestSync tally.Counter
@@ -55,20 +58,65 @@ type bufferMetrics struct {
bufferScanBatch tally.Timer
}
+type counterPerNumRefBuckets struct {
+ buckets []counterPerNumRefBucket
+ unknownBucket tally.Counter
+}
+
+type counterPerNumRefBucket struct {
+ // numRef is the counter for the number of references at time of count
+ // of the ref counted message.
+ numRef int
+ // counter is the actual counter for this bucket.
+ counter tally.Counter
+}
+
+func newCounterPerNumRefBuckets(
+ scope tally.Scope,
+ name string,
+ n int,
+) counterPerNumRefBuckets {
+ buckets := make([]counterPerNumRefBucket, 0, n)
+ for i := 0; i < n; i++ {
+ buckets = append(buckets, counterPerNumRefBucket{
+ numRef: i,
+ counter: scope.Tagged(map[string]string{
+ "num-replicas": strconv.Itoa(i),
+ }).Counter(name),
+ })
+ }
+ return counterPerNumRefBuckets{
+ buckets: buckets,
+ unknownBucket: scope.Tagged(map[string]string{
+ "num-replicas": "unknown",
+ }).Counter(name),
+ }
+}
+
+func (c counterPerNumRefBuckets) Inc(numRef int32, delta int64) {
+ for _, b := range c.buckets {
+ if b.numRef == int(numRef) {
+ b.counter.Inc(delta)
+ return
+ }
+ }
+ c.unknownBucket.Inc(delta)
+}
+
func newBufferMetrics(
scope tally.Scope,
- samplingRate float64,
+ opts instrument.TimerOptions,
) bufferMetrics {
return bufferMetrics{
- messageDropped: scope.Counter("buffer-message-dropped"),
- byteDropped: scope.Counter("buffer-byte-dropped"),
+ messageDropped: newCounterPerNumRefBuckets(scope, "buffer-message-dropped", 10),
+ byteDropped: newCounterPerNumRefBuckets(scope, "buffer-byte-dropped", 10),
messageTooLarge: scope.Counter("message-too-large"),
cleanupNoProgress: scope.Counter("cleanup-no-progress"),
dropOldestSync: scope.Counter("drop-oldest-sync"),
dropOldestAsync: scope.Counter("drop-oldest-async"),
messageBuffered: scope.Gauge("message-buffered"),
byteBuffered: scope.Gauge("byte-buffered"),
- bufferScanBatch: instrument.MustCreateSampledTimer(scope.Timer("buffer-scan-batch"), samplingRate),
+ bufferScanBatch: instrument.NewTimer(scope, "buffer-scan-batch", opts),
}
}
@@ -113,7 +161,7 @@ func NewBuffer(opts Options) (producer.Buffer, error) {
retrier: retry.NewRetrier(opts.CleanupRetryOptions()),
m: newBufferMetrics(
opts.InstrumentOptions().MetricsScope(),
- opts.InstrumentOptions().MetricsSamplingRate(),
+ opts.InstrumentOptions().TimerOptions(),
),
size: atomic.NewUint64(0),
isClosed: false,
@@ -155,7 +203,7 @@ func (b *buffer) produceOnFull(newBufferSize uint64, messageSize uint64) error {
switch b.opts.OnFullStrategy() {
case ReturnError:
b.size.Sub(messageSize)
- return errBufferFull
+ return ErrBufferFull
case DropOldest:
if newBufferSize >= b.maxSpilloverSize {
// The size after the write reached max allowed spill over size.
@@ -288,8 +336,10 @@ func (b *buffer) cleanupBatchWithListLock(
if rm.Drop() {
b.bufferList.Remove(e)
removed++
- b.m.messageDropped.Inc(1)
- b.m.byteDropped.Inc(int64(rm.Size()))
+
+ numRef := rm.NumRef()
+ b.m.messageDropped.Inc(numRef, 1)
+ b.m.byteDropped.Inc(numRef, int64(rm.Size()))
}
}
return next, removed
@@ -346,8 +396,9 @@ func (b *buffer) dropOldestBatchUntilTargetWithListLock(
// There is a chance that the message is consumed right before
// the drop call which will lead drop to return false.
if rm.Drop() {
- b.m.messageDropped.Inc(1)
- b.m.byteDropped.Inc(int64(rm.Size()))
+ numRef := rm.NumRef()
+ b.m.messageDropped.Inc(numRef, 1)
+ b.m.byteDropped.Inc(numRef, int64(rm.Size()))
}
}
return false
diff --git a/src/msg/producer/config/producer.go b/src/msg/producer/config/producer.go
index 569f627f57..53a312a0c9 100644
--- a/src/msg/producer/config/producer.go
+++ b/src/msg/producer/config/producer.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/msg/producer/buffer"
"github.com/m3db/m3/src/msg/producer/writer"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
)
// ProducerConfiguration configs the producer.
@@ -37,8 +38,9 @@ type ProducerConfiguration struct {
func (c *ProducerConfiguration) newOptions(
cs client.Client,
iOpts instrument.Options,
+ rwOpts xio.Options,
) (producer.Options, error) {
- wOpts, err := c.Writer.NewOptions(cs, iOpts)
+ wOpts, err := c.Writer.NewOptions(cs, iOpts, rwOpts)
if err != nil {
return nil, err
}
@@ -55,8 +57,9 @@ func (c *ProducerConfiguration) newOptions(
func (c *ProducerConfiguration) NewProducer(
cs client.Client,
iOpts instrument.Options,
+ rwOpts xio.Options,
) (producer.Producer, error) {
- opts, err := c.newOptions(cs, iOpts)
+ opts, err := c.newOptions(cs, iOpts, rwOpts)
if err != nil {
return nil, err
}
diff --git a/src/msg/producer/config/producer_test.go b/src/msg/producer/config/producer_test.go
index 360dab97d9..c1a1fa94a6 100644
--- a/src/msg/producer/config/producer_test.go
+++ b/src/msg/producer/config/producer_test.go
@@ -25,6 +25,7 @@ import (
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
@@ -50,6 +51,6 @@ writer:
cs.EXPECT().Store(gomock.Any()).Return(nil, nil)
cs.EXPECT().Services(gomock.Any()).Return(nil, nil)
- _, err := cfg.newOptions(cs, instrument.NewOptions())
+ _, err := cfg.newOptions(cs, instrument.NewOptions(), xio.NewOptions())
require.NoError(t, err)
}
diff --git a/src/msg/producer/config/writer.go b/src/msg/producer/config/writer.go
index d44af35ccf..3b0f241bf7 100644
--- a/src/msg/producer/config/writer.go
+++ b/src/msg/producer/config/writer.go
@@ -25,11 +25,13 @@ import (
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/kv"
+ "github.com/m3db/m3/src/cluster/placement"
"github.com/m3db/m3/src/cluster/services"
"github.com/m3db/m3/src/msg/producer/writer"
"github.com/m3db/m3/src/msg/protocol/proto"
"github.com/m3db/m3/src/msg/topic"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/retry"
@@ -38,6 +40,7 @@ import (
// ConnectionConfiguration configs the connection options.
type ConnectionConfiguration struct {
+ NumConnections *int `yaml:"numConnections"`
DialTimeout *time.Duration `yaml:"dialTimeout"`
WriteTimeout *time.Duration `yaml:"writeTimeout"`
KeepAlivePeriod *time.Duration `yaml:"keepAlivePeriod"`
@@ -51,6 +54,9 @@ type ConnectionConfiguration struct {
// NewOptions creates connection options.
func (c *ConnectionConfiguration) NewOptions(iOpts instrument.Options) writer.ConnectionOptions {
opts := writer.NewConnectionOptions()
+ if c.NumConnections != nil {
+ opts = opts.SetNumConnections(*c.NumConnections)
+ }
if c.DialTimeout != nil {
opts = opts.SetDialTimeout(*c.DialTimeout)
}
@@ -83,6 +89,7 @@ type WriterConfiguration struct {
TopicName string `yaml:"topicName" validate:"nonzero"`
TopicServiceOverride kv.OverrideConfiguration `yaml:"topicServiceOverride"`
TopicWatchInitTimeout *time.Duration `yaml:"topicWatchInitTimeout"`
+ PlacementOptions placement.Configuration `yaml:"placement"`
PlacementServiceOverride services.OverrideConfiguration `yaml:"placementServiceOverride"`
PlacementWatchInitTimeout *time.Duration `yaml:"placementWatchInitTimeout"`
MessagePool *pool.ObjectPoolConfiguration `yaml:"messagePool"`
@@ -102,21 +109,28 @@ type WriterConfiguration struct {
func (c *WriterConfiguration) NewOptions(
cs client.Client,
iOpts instrument.Options,
+ rwOptions xio.Options,
) (writer.Options, error) {
- opts := writer.NewOptions().SetTopicName(c.TopicName)
+ opts := writer.NewOptions().
+ SetTopicName(c.TopicName).
+ SetPlacementOptions(c.PlacementOptions.NewOptions()).
+ SetInstrumentOptions(iOpts)
+
kvOpts, err := c.TopicServiceOverride.NewOverrideOptions()
if err != nil {
return nil, err
}
- ts, err := topic.NewService(
- topic.NewServiceOptions().
- SetConfigService(cs).
- SetKVOverrideOptions(kvOpts),
- )
+
+ topicServiceOpts := topic.NewServiceOptions().
+ SetConfigService(cs).
+ SetKVOverrideOptions(kvOpts)
+ ts, err := topic.NewService(topicServiceOpts)
if err != nil {
return nil, err
}
+
opts = opts.SetTopicService(ts)
+
if c.TopicWatchInitTimeout != nil {
opts = opts.SetTopicWatchInitTimeout(*c.TopicWatchInitTimeout)
}
@@ -124,7 +138,9 @@ func (c *WriterConfiguration) NewOptions(
if err != nil {
return nil, err
}
+
opts = opts.SetServiceDiscovery(sd)
+
if c.PlacementWatchInitTimeout != nil {
opts = opts.SetPlacementWatchInitTimeout(*c.PlacementWatchInitTimeout)
}
@@ -161,5 +177,7 @@ func (c *WriterConfiguration) NewOptions(
if c.Connection != nil {
opts = opts.SetConnectionOptions(c.Connection.NewOptions(iOpts))
}
- return opts.SetInstrumentOptions(iOpts), nil
+
+ opts = opts.SetDecoderOptions(opts.DecoderOptions().SetRWOptions(rwOptions))
+ return opts, nil
}
diff --git a/src/msg/producer/config/writer_test.go b/src/msg/producer/config/writer_test.go
index 0d00a36f25..ff7a733f54 100644
--- a/src/msg/producer/config/writer_test.go
+++ b/src/msg/producer/config/writer_test.go
@@ -28,6 +28,7 @@ import (
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cluster/services"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
@@ -106,7 +107,7 @@ decoder:
),
).Return(nil, nil)
- wOpts, err := cfg.NewOptions(cs, instrument.NewOptions())
+ wOpts, err := cfg.NewOptions(cs, instrument.NewOptions(), xio.NewOptions())
require.NoError(t, err)
require.Equal(t, "testTopic", wOpts.TopicName())
require.Equal(t, time.Second, wOpts.TopicWatchInitTimeout())
diff --git a/src/msg/producer/ref_counted.go b/src/msg/producer/ref_counted.go
index 11f3ae4a4d..18ecb24869 100644
--- a/src/msg/producer/ref_counted.go
+++ b/src/msg/producer/ref_counted.go
@@ -31,24 +31,24 @@ type OnFinalizeFn func(rm *RefCountedMessage)
// RefCountedMessage is a reference counted message.
type RefCountedMessage struct {
- sync.RWMutex
+ mu sync.RWMutex
Message
size uint64
onFinalizeFn OnFinalizeFn
- refCount *atomic.Int32
- isDroppedOrConsumed *atomic.Bool
+ // RefCountedMessage must not be copied by value due to RWMutex,
+ // safe to store values here and not just pointers
+ refCount atomic.Int32
+ isDroppedOrConsumed atomic.Bool
}
// NewRefCountedMessage creates RefCountedMessage.
func NewRefCountedMessage(m Message, fn OnFinalizeFn) *RefCountedMessage {
return &RefCountedMessage{
- Message: m,
- refCount: atomic.NewInt32(0),
- size: uint64(m.Size()),
- onFinalizeFn: fn,
- isDroppedOrConsumed: atomic.NewBool(false),
+ Message: m,
+ size: uint64(m.Size()),
+ onFinalizeFn: fn,
}
}
@@ -76,12 +76,17 @@ func (rm *RefCountedMessage) DecRef() {
// IncReads increments the reads count.
func (rm *RefCountedMessage) IncReads() {
- rm.RLock()
+ rm.mu.RLock()
}
// DecReads decrements the reads count.
func (rm *RefCountedMessage) DecReads() {
- rm.RUnlock()
+ rm.mu.RUnlock()
+}
+
+// NumRef returns the number of references remaining.
+func (rm *RefCountedMessage) NumRef() int32 {
+ return rm.refCount.Load()
}
// Size returns the size of the message.
@@ -102,13 +107,13 @@ func (rm *RefCountedMessage) IsDroppedOrConsumed() bool {
func (rm *RefCountedMessage) finalize(r FinalizeReason) bool {
// NB: This lock prevents the message from being finalized when its still
// being read.
- rm.Lock()
+ rm.mu.Lock()
if rm.isDroppedOrConsumed.Load() {
- rm.Unlock()
+ rm.mu.Unlock()
return false
}
rm.isDroppedOrConsumed.Store(true)
- rm.Unlock()
+ rm.mu.Unlock()
if rm.onFinalizeFn != nil {
rm.onFinalizeFn(rm)
}
diff --git a/src/msg/producer/writer/README.md b/src/msg/producer/writer/README.md
new file mode 100644
index 0000000000..3743412e2a
--- /dev/null
+++ b/src/msg/producer/writer/README.md
@@ -0,0 +1,9 @@
+# m3msg writer
+
+Messages are written in the following manner:
+1. Write to the public `Writer` in `writer.go`, which acquires read lock on writer (can be concurrent).
+2. That writes to all registered `consumerServiceWriter` writers (one per downstream service) in a sequential loop, one after another.
+3. The `consumerServiceWriter` selects a shard by asking message what shard it is and writes immediately to that shard's `shardWriter`, without taking any locks in any of this process (should check for out of bounds of the shard in future).
+4. The `shardWriter` then acquires a read lock and writes it to a `messageWriter`.
+5. The `messageWriter` then acquires a write lock on itself and pushes the message onto a queue, at this point it seems `messageWriter` has a single `consumerWriter` which it sends message in a batch to from the `messageWriter` queue pertiodically with `writeBatch`.
+6. The `consumerWriter` (one per downstream consumer instance) then takes a write lock for the connection index selected every write that it receives. The `messageWriter` selects the connection index based on the shard ID so that shards should balance the connection they ultimately use to send data downstream to instances (so IO is not blocked on a per downstream instance).
diff --git a/src/msg/producer/writer/consumer_service_writer.go b/src/msg/producer/writer/consumer_service_writer.go
index cbab8ac995..2e4bd96f6b 100644
--- a/src/msg/producer/writer/consumer_service_writer.go
+++ b/src/msg/producer/writer/consumer_service_writer.go
@@ -124,7 +124,8 @@ func newConsumerServiceWriter(
numShards uint32,
opts Options,
) (consumerServiceWriter, error) {
- ps, err := opts.ServiceDiscovery().PlacementService(cs.ServiceID(), nil)
+ ps, err := opts.ServiceDiscovery().
+ PlacementService(cs.ServiceID(), opts.PlacementOptions())
if err != nil {
return nil, err
}
@@ -161,7 +162,7 @@ func initShardWriters(
sws = make([]shardWriter, numberOfShards)
m = newMessageWriterMetrics(
opts.InstrumentOptions().MetricsScope(),
- opts.InstrumentOptions().MetricsSamplingRate(),
+ opts.InstrumentOptions().TimerOptions(),
)
mPool messagePool
)
diff --git a/src/msg/producer/writer/consumer_service_writer_test.go b/src/msg/producer/writer/consumer_service_writer_test.go
index baa0520be9..f9d66e02a1 100644
--- a/src/msg/producer/writer/consumer_service_writer_test.go
+++ b/src/msg/producer/writer/consumer_service_writer_test.go
@@ -38,6 +38,7 @@ import (
"github.com/m3db/m3/src/msg/producer"
"github.com/m3db/m3/src/msg/protocol/proto"
"github.com/m3db/m3/src/msg/topic"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/fortytw2/leaktest"
"github.com/golang/mock/gomock"
@@ -47,7 +48,7 @@ import (
func TestConsumerServiceWriterWithSharedConsumerWithNonShardedPlacement(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -171,7 +172,7 @@ func TestConsumerServiceWriterWithSharedConsumerWithNonShardedPlacement(t *testi
func TestConsumerServiceWriterWithSharedConsumerWithShardedPlacement(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -309,7 +310,7 @@ func TestConsumerServiceWriterWithSharedConsumerWithShardedPlacement(t *testing.
func TestConsumerServiceWriterWithReplicatedConsumerWithShardedPlacement(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -450,7 +451,7 @@ func TestConsumerServiceWriterWithReplicatedConsumerWithShardedPlacement(t *test
return
}
serverEncoder := proto.NewEncoder(opts.EncoderOptions())
- serverDecoder := proto.NewDecoder(conn, opts.DecoderOptions())
+ serverDecoder := proto.NewDecoder(conn, opts.DecoderOptions(), 10)
var msg msgpb.Message
err = serverDecoder.Decode(&msg)
@@ -487,7 +488,7 @@ func TestConsumerServiceWriterWithReplicatedConsumerWithShardedPlacement(t *test
func TestConsumerServiceWriterFilter(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -531,7 +532,7 @@ func TestConsumerServiceWriterFilter(t *testing.T) {
func TestConsumerServiceWriterAllowInitValueErrorWithCreateWatchError(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -554,7 +555,7 @@ func TestConsumerServiceWriterAllowInitValueErrorWithCreateWatchError(t *testing
func TestConsumerServiceWriterAllowInitValueErrorWithInitValueError(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -575,7 +576,7 @@ func TestConsumerServiceWriterAllowInitValueErrorWithInitValueError(t *testing.T
func TestConsumerServiceWriterInitError(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -600,7 +601,7 @@ func TestConsumerServiceWriterInitError(t *testing.T) {
func TestConsumerServiceWriterUpdateNonShardedPlacementWithReplicatedConsumptionType(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
@@ -625,7 +626,7 @@ func TestConsumerServiceWriterUpdateNonShardedPlacementWithReplicatedConsumption
func TestConsumerServiceCloseShardWritersConcurrently(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
sid := services.NewServiceID().SetName("foo")
diff --git a/src/msg/producer/writer/consumer_writer.go b/src/msg/producer/writer/consumer_writer.go
index b749013292..f7cb7a3e43 100644
--- a/src/msg/producer/writer/consumer_writer.go
+++ b/src/msg/producer/writer/consumer_writer.go
@@ -21,8 +21,8 @@
package writer
import (
- "bufio"
"errors"
+ "fmt"
"io"
"net"
"sync"
@@ -31,10 +31,10 @@ import (
"github.com/m3db/m3/src/msg/generated/proto/msgpb"
"github.com/m3db/m3/src/msg/protocol/proto"
"github.com/m3db/m3/src/x/clock"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/retry"
"github.com/uber-go/tally"
- "go.uber.org/atomic"
"go.uber.org/zap"
)
@@ -43,17 +43,16 @@ const (
)
var (
- u uninitializedReadWriter
-
errInvalidConnection = errors.New("connection is invalid")
+ u uninitializedReadWriter
)
type consumerWriter interface {
// Address returns the consumer address.
Address() string
- // Write writes the bytes, it is thread safe.
- Write(b []byte) error
+ // Write writes the bytes, it is thread safe per connection index.
+ Write(connIndex int, b []byte) error
// Init initializes the consumer writer.
Init()
@@ -94,10 +93,11 @@ func newConsumerWriterMetrics(scope tally.Scope) consumerWriterMetrics {
type connectFn func(addr string) (io.ReadWriteCloser, error)
+type connectAllFn func(addr string) ([]io.ReadWriteCloser, error)
+
type consumerWriterImpl struct {
- writeLock sync.Mutex
- decodeLock sync.Mutex
- decoder proto.Decoder
+ writeState consumerWriterImplWriteState
+
addr string
router ackRouter
opts Options
@@ -106,21 +106,37 @@ type consumerWriterImpl struct {
connRetrier retry.Retrier
logger *zap.Logger
- validConn *atomic.Bool
- conn io.ReadWriteCloser
- rw *bufio.ReadWriter
- lastResetNanos int64
- resetCh chan struct{}
- ack msgpb.Ack
- closed *atomic.Bool
- doneCh chan struct{}
- wg sync.WaitGroup
- m consumerWriterMetrics
+ resetCh chan struct{}
+ doneCh chan struct{}
+ wg sync.WaitGroup
+ m consumerWriterMetrics
nowFn clock.NowFn
connectFn connectFn
}
+type consumerWriterImplWriteState struct {
+ sync.RWMutex
+
+ closed bool
+ validConns bool
+
+ // conns keeps active connections.
+ // Note: readers will take a reference to this slice with a lock
+ // then loop through it and call decode on decoders, so not safe
+ // to reuse.
+ conns []*connection
+ lastResetNanos int64
+}
+
+type connection struct {
+ writeLock sync.Mutex
+ conn io.ReadWriteCloser
+ w xio.ResettableWriter
+ decoder proto.Decoder
+ ack msgpb.Ack
+}
+
func newConsumerWriter(
addr string,
router ackRouter,
@@ -131,36 +147,42 @@ func newConsumerWriter(
opts = NewOptions()
}
- var (
- connOpts = opts.ConnectionOptions()
- rw = bufio.NewReadWriter(
- bufio.NewReaderSize(u, connOpts.ReadBufferSize()),
- bufio.NewWriterSize(u, connOpts.WriteBufferSize()),
- )
- )
+ connOpts := opts.ConnectionOptions()
w := &consumerWriterImpl{
- decoder: proto.NewDecoder(rw, opts.DecoderOptions()),
- addr: addr,
- router: router,
- opts: opts,
- connOpts: connOpts,
- ackRetrier: retry.NewRetrier(opts.AckErrorRetryOptions()),
- connRetrier: retry.NewRetrier(connOpts.RetryOptions().SetForever(defaultRetryForever)),
- logger: opts.InstrumentOptions().Logger(),
- validConn: atomic.NewBool(false),
- conn: u,
- rw: rw,
- lastResetNanos: 0,
- resetCh: make(chan struct{}, 1),
- closed: atomic.NewBool(false),
- doneCh: make(chan struct{}),
- m: m,
- nowFn: time.Now,
+ addr: addr,
+ router: router,
+ opts: opts,
+ connOpts: connOpts,
+ ackRetrier: retry.NewRetrier(opts.AckErrorRetryOptions()),
+ connRetrier: retry.NewRetrier(connOpts.RetryOptions().SetForever(defaultRetryForever)),
+ logger: opts.InstrumentOptions().Logger(),
+ resetCh: make(chan struct{}, 1),
+ doneCh: make(chan struct{}),
+ m: m,
+ nowFn: time.Now,
}
-
- w.connectFn = w.connectOnce
- if err := w.resetWithConnectFn(w.connectFn); err != nil {
- w.notifyReset()
+ w.connectFn = w.connectNoRetry
+
+ // Initialize no-op connections since it's valid even if connecting the
+ // first time fails to continue to try to write to the writer.
+ // Note: Also tests try to break a non-connected writer.
+ conns := make([]io.ReadWriteCloser, 0, connOpts.NumConnections())
+ for i := 0; i < connOpts.NumConnections(); i++ {
+ conns = append(conns, u)
+ }
+ // NB(r): Reset at epoch since a connection failure should trigger
+ // an immediate reset after first connection attempt (if write fails
+ // since first connection is with no retry).
+ w.reset(resetOptions{
+ connections: conns,
+ at: time.Time{},
+ validConns: false,
+ })
+
+ // Try connecting without retry first attempt.
+ connectAllNoRetry := w.newConnectFn(connectOptions{retry: false})
+ if err := w.resetWithConnectFn(connectAllNoRetry); err != nil {
+ w.notifyReset(err)
}
return w
}
@@ -171,18 +193,34 @@ func (w *consumerWriterImpl) Address() string {
// Write should fail fast so that the write could be tried on other
// consumer writers that are sharing the message queue.
-func (w *consumerWriterImpl) Write(b []byte) error {
- if !w.validConn.Load() {
+func (w *consumerWriterImpl) Write(connIndex int, b []byte) error {
+ w.writeState.RLock()
+ if !w.writeState.validConns || len(w.writeState.conns) == 0 {
+ w.writeState.RUnlock()
w.m.writeInvalidConn.Inc(1)
return errInvalidConnection
}
- w.writeLock.Lock()
- _, err := w.rw.Write(b)
- w.writeLock.Unlock()
+ if connIndex < 0 || connIndex >= len(w.writeState.conns) {
+ w.writeState.RUnlock()
+ return fmt.Errorf("connection index out of range: %d", connIndex)
+ }
+
+ writeConn := w.writeState.conns[connIndex]
+
+ // Make sure only writer to this connection.
+ writeConn.writeLock.Lock()
+ _, err := writeConn.w.Write(b)
+ writeConn.writeLock.Unlock()
+
+ // Hold onto the write state lock until done, since flushing and
+ // closing connections are done by acquiring the write state lock.
+ w.writeState.RUnlock()
+
if err != nil {
- w.notifyReset()
+ w.notifyReset(err)
w.m.encodeError.Inc(1)
}
+
return err
}
@@ -193,11 +231,14 @@ func (w *consumerWriterImpl) Init() {
w.wg.Done()
}()
- w.wg.Add(1)
- go func() {
- w.readAcksUntilClose()
- w.wg.Done()
- }()
+ for i := 0; i < w.connOpts.NumConnections(); i++ {
+ idx := i
+ w.wg.Add(1)
+ go func() {
+ w.readAcksUntilClose(idx)
+ w.wg.Done()
+ }()
+ }
w.wg.Add(1)
go func() {
@@ -213,9 +254,13 @@ func (w *consumerWriterImpl) flushUntilClose() {
for {
select {
case <-flushTicker.C:
- w.writeLock.Lock()
- w.rw.Flush()
- w.writeLock.Unlock()
+ w.writeState.Lock()
+ for _, conn := range w.writeState.conns {
+ if err := conn.w.Flush(); err != nil {
+ w.notifyReset(err)
+ }
+ }
+ w.writeState.Unlock()
case <-w.doneCh:
return
}
@@ -231,7 +276,9 @@ func (w *consumerWriterImpl) resetConnectionUntilClose() {
w.m.resetTooSoon.Inc(1)
continue
}
- if err := w.resetWithConnectFn(w.connectWithRetry); err != nil {
+ // Connect with retry.
+ connectAllWithRetry := w.newConnectFn(connectOptions{retry: true})
+ if err := w.resetWithConnectFn(connectAllWithRetry); err != nil {
w.m.resetError.Inc(1)
w.logger.Error("could not reconnect", zap.String("address", w.addr), zap.Error(err))
continue
@@ -239,37 +286,48 @@ func (w *consumerWriterImpl) resetConnectionUntilClose() {
w.m.resetSuccess.Inc(1)
w.logger.Info("reconnected", zap.String("address", w.addr))
case <-w.doneCh:
- w.conn.Close()
+ w.writeState.Lock()
+ for _, c := range w.writeState.conns {
+ c.conn.Close()
+ }
+ w.writeState.Unlock()
return
}
}
}
func (w *consumerWriterImpl) resetTooSoon() bool {
- return w.nowFn().UnixNano() < w.lastResetNanos+int64(w.connOpts.ResetDelay())
+ w.writeState.Lock()
+ defer w.writeState.Unlock()
+ return w.nowFn().UnixNano() < w.writeState.lastResetNanos+int64(w.connOpts.ResetDelay())
}
-func (w *consumerWriterImpl) resetWithConnectFn(fn connectFn) error {
- w.validConn.Store(false)
- conn, err := fn(w.addr)
+func (w *consumerWriterImpl) resetWithConnectFn(fn connectAllFn) error {
+ w.writeState.Lock()
+ w.writeState.validConns = false
+ w.writeState.Unlock()
+ conns, err := fn(w.addr)
if err != nil {
return err
}
- w.reset(conn)
- w.validConn.Store(true)
+ w.reset(resetOptions{
+ connections: conns,
+ at: w.nowFn(),
+ validConns: true,
+ })
return nil
}
-func (w *consumerWriterImpl) readAcksUntilClose() {
+func (w *consumerWriterImpl) readAcksUntilClose(idx int) {
for {
select {
case <-w.doneCh:
return
default:
- w.ackRetrier.AttemptWhile(
- w.continueFn,
- w.readAcks,
- )
+ w.ackRetrier.AttemptWhile(w.continueFn,
+ func() error {
+ return w.readAcks(idx)
+ })
}
}
}
@@ -278,69 +336,114 @@ func (w *consumerWriterImpl) continueFn(int) bool {
return !w.isClosed()
}
-func (w *consumerWriterImpl) readAcks() error {
- if !w.validConn.Load() {
+func (w *consumerWriterImpl) readAcks(idx int) error {
+ w.writeState.RLock()
+ validConns := w.writeState.validConns
+ conn := w.writeState.conns[idx]
+ w.writeState.RUnlock()
+ if !validConns {
w.m.readInvalidConn.Inc(1)
return errInvalidConnection
}
+
+ // Read from decoder, safe to read from acquired decoder as not re-used.
// NB(cw) The proto needs to be cleaned up because the gogo protobuf
// unmarshalling will append to the underlying slice.
- w.ack.Metadata = w.ack.Metadata[:0]
- w.decodeLock.Lock()
- err := w.decoder.Decode(&w.ack)
- w.decodeLock.Unlock()
+ conn.ack.Metadata = conn.ack.Metadata[:0]
+ err := conn.decoder.Decode(&conn.ack)
if err != nil {
- w.notifyReset()
+ w.notifyReset(err)
w.m.decodeError.Inc(1)
return err
}
- for _, m := range w.ack.Metadata {
+ for _, m := range conn.ack.Metadata {
if err := w.router.Ack(newMetadataFromProto(m)); err != nil {
w.m.ackError.Inc(1)
// This is fine, usually this means the ack has been acked.
w.logger.Error("could not ack metadata", zap.Error(err))
}
}
+
return nil
}
func (w *consumerWriterImpl) Close() {
- if !w.closed.CAS(false, true) {
+ w.writeState.Lock()
+ wasClosed := w.writeState.closed
+ w.writeState.closed = true
+ w.writeState.Unlock()
+
+ if wasClosed {
return
}
+
close(w.doneCh)
+
w.wg.Wait()
}
-func (w *consumerWriterImpl) notifyReset() {
+func (w *consumerWriterImpl) notifyReset(err error) {
select {
case w.resetCh <- struct{}{}:
+ if err != nil {
+ w.logger.Error("connection error", zap.Error(err))
+ }
default:
}
}
func (w *consumerWriterImpl) isClosed() bool {
- return w.closed.Load()
+ w.writeState.Lock()
+ defer w.writeState.Unlock()
+ return w.writeState.closed
+}
+
+type resetOptions struct {
+ connections []io.ReadWriteCloser
+ at time.Time
+ validConns bool
}
-func (w *consumerWriterImpl) reset(conn io.ReadWriteCloser) {
- // Close the connection to wake up potential blocking encode/decode calls.
- w.conn.Close()
+func (w *consumerWriterImpl) reset(opts resetOptions) {
+ w.writeState.Lock()
+ prevConns := w.writeState.conns
+ defer func() {
+ w.writeState.Unlock()
+ // Close existing connections outside of locks.
+ for _, c := range prevConns {
+ c.conn.Close()
+ }
+ }()
+
+ var (
+ wOpts = xio.ResettableWriterOptions{
+ WriteBufferSize: w.connOpts.WriteBufferSize(),
+ }
+
+ rwOpts = w.opts.DecoderOptions().RWOptions()
+ writerFn = rwOpts.ResettableWriterFn()
+ )
+
+ w.writeState.conns = make([]*connection, 0, len(opts.connections))
+ for _, conn := range opts.connections {
+ wr := writerFn(u, wOpts)
+ wr.Reset(conn)
- // NB: Connection can only be reset between encode/decode calls.
- w.decodeLock.Lock()
- defer w.decodeLock.Unlock()
+ decoder := proto.NewDecoder(conn, w.opts.DecoderOptions(), w.connOpts.ReadBufferSize())
+ newConn := &connection{
+ conn: conn,
+ w: wr,
+ decoder: decoder,
+ }
- w.writeLock.Lock()
- defer w.writeLock.Unlock()
+ w.writeState.conns = append(w.writeState.conns, newConn)
+ }
- w.conn = conn
- w.rw.Reader.Reset(conn)
- w.rw.Writer.Reset(conn)
- w.lastResetNanos = w.nowFn().UnixNano()
+ w.writeState.lastResetNanos = opts.at.UnixNano()
+ w.writeState.validConns = opts.validConns
}
-func (w *consumerWriterImpl) connectOnce(addr string) (io.ReadWriteCloser, error) {
+func (w *consumerWriterImpl) connectNoRetry(addr string) (io.ReadWriteCloser, error) {
conn, err := net.DialTimeout("tcp", addr, w.connOpts.DialTimeout())
if err != nil {
w.m.connectError.Inc(1)
@@ -360,22 +463,39 @@ func (w *consumerWriterImpl) connectOnce(addr string) (io.ReadWriteCloser, error
return newReadWriterWithTimeout(conn, w.connOpts.WriteTimeout(), w.nowFn), nil
}
-func (w *consumerWriterImpl) connectWithRetry(addr string) (io.ReadWriteCloser, error) {
- var (
- conn io.ReadWriteCloser
- err error
- )
- fn := func() error {
- conn, err = w.connectFn(addr)
- return err
- }
- if attemptErr := w.connRetrier.AttemptWhile(
- w.continueFn,
- fn,
- ); attemptErr != nil {
- return nil, attemptErr
+type connectOptions struct {
+ retry bool
+}
+
+func (w *consumerWriterImpl) newConnectFn(opts connectOptions) connectAllFn {
+ return func(addr string) ([]io.ReadWriteCloser, error) {
+ var (
+ numConns = w.connOpts.NumConnections()
+ conns = make([]io.ReadWriteCloser, 0, numConns)
+ )
+ for i := 0; i < numConns; i++ {
+ var (
+ conn io.ReadWriteCloser
+ fn = func() error {
+ var connectErr error
+ conn, connectErr = w.connectFn(addr)
+ return connectErr
+ }
+ resultErr error
+ )
+ if !opts.retry {
+ resultErr = fn()
+ } else {
+ resultErr = w.connRetrier.AttemptWhile(w.continueFn, fn)
+ }
+ if resultErr != nil {
+ return nil, resultErr
+ }
+
+ conns = append(conns, conn)
+ }
+ return conns, nil
}
- return conn, nil
}
type readWriterWithTimeout struct {
@@ -394,7 +514,9 @@ func newReadWriterWithTimeout(conn net.Conn, timeout time.Duration, nowFn clock.
}
func (conn readWriterWithTimeout) Write(p []byte) (int, error) {
- conn.SetWriteDeadline(conn.nowFn().Add(conn.timeout))
+ if conn.timeout > 0 {
+ conn.SetWriteDeadline(conn.nowFn().Add(conn.timeout))
+ }
return conn.Conn.Write(p)
}
diff --git a/src/msg/producer/writer/consumer_writer_test.go b/src/msg/producer/writer/consumer_writer_test.go
index c6511b336d..71f4e1fd57 100644
--- a/src/msg/producer/writer/consumer_writer_test.go
+++ b/src/msg/producer/writer/consumer_writer_test.go
@@ -31,9 +31,9 @@ import (
"github.com/m3db/m3/src/msg/protocol/proto"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/retry"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/fortytw2/leaktest"
- "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/uber-go/tally"
@@ -58,11 +58,13 @@ func TestNewConsumerWriter(t *testing.T) {
require.NoError(t, err)
defer lis.Close()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockRouter := NewMockackRouter(ctrl)
+
opts := testOptions()
+
w := newConsumerWriter(lis.Addr().String(), mockRouter, opts, testConsumerWriterMetrics()).(*consumerWriterImpl)
require.Equal(t, 0, len(w.resetCh))
@@ -87,11 +89,13 @@ func TestNewConsumerWriter(t *testing.T) {
w.Close()
// Make sure the connection is closed after closing the consumer writer.
- _, err = w.conn.Read([]byte{})
+ _, err = w.writeState.conns[0].conn.Read([]byte{})
require.Error(t, err)
require.Contains(t, err.Error(), "closed network connection")
}
+// TODO: tests for multiple connection writers.
+
func TestConsumerWriterSignalResetConnection(t *testing.T) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
@@ -106,7 +110,7 @@ func TestConsumerWriterSignalResetConnection(t *testing.T) {
return uninitializedReadWriter{}, nil
}
- w.notifyReset()
+ w.notifyReset(nil)
require.Equal(t, 1, len(w.resetCh))
require.True(t, w.resetTooSoon())
@@ -114,7 +118,7 @@ func TestConsumerWriterSignalResetConnection(t *testing.T) {
w.nowFn = func() time.Time { return now.Add(1 * time.Hour) }
require.Equal(t, 1, len(w.resetCh))
require.False(t, w.resetTooSoon())
- require.NoError(t, w.resetWithConnectFn(w.connectFn))
+ require.NoError(t, w.resetWithConnectFn(w.newConnectFn(connectOptions{retry: false})))
require.Equal(t, 1, called)
require.Equal(t, 1, len(w.resetCh))
@@ -123,7 +127,7 @@ func TestConsumerWriterSignalResetConnection(t *testing.T) {
w.nowFn = func() time.Time { return now.Add(2 * time.Hour) }
require.False(t, w.resetTooSoon())
- require.NoError(t, w.resetWithConnectFn(w.connectFn))
+ require.NoError(t, w.resetWithConnectFn(w.newConnectFn(connectOptions{retry: false})))
require.Equal(t, 2, called)
}
@@ -141,7 +145,7 @@ func TestConsumerWriterResetConnection(t *testing.T) {
require.Equal(t, "badAddress", addr)
return conn, nil
}
- w.resetWithConnectFn(w.connectWithRetry)
+ w.resetWithConnectFn(w.newConnectFn(connectOptions{retry: true}))
require.Equal(t, 1, called)
}
@@ -189,7 +193,10 @@ func TestConsumerWriterWriteErrorTriggerReset(t *testing.T) {
require.Error(t, err)
require.Equal(t, errInvalidConnection, err)
require.Equal(t, 0, len(w.resetCh))
- w.validConn.Store(true)
+ w.writeState.Lock()
+ w.writeState.validConns = true
+ w.writeState.Unlock()
+
err = write(w, &testMsg)
require.NoError(t, err)
for {
@@ -218,13 +225,17 @@ func TestConsumerWriterFlushWriteAfterFlushErrorTriggerReset(t *testing.T) {
require.Error(t, err)
require.Equal(t, errInvalidConnection, err)
require.Equal(t, 0, len(w.resetCh))
- w.validConn.Store(true)
+ w.writeState.Lock()
+ w.writeState.validConns = true
+ w.writeState.Unlock()
// The write will be buffered in the bufio.Writer, and will
// not return err because it has not tried to flush yet.
require.NoError(t, write(w, &testMsg))
- require.Error(t, w.rw.Flush())
+ w.writeState.Lock()
+ require.Error(t, w.writeState.conns[0].w.Flush())
+ w.writeState.Unlock()
// Flush err will be stored in bufio.Writer, the next time
// Write is called, the err will be returned.
@@ -240,9 +251,11 @@ func TestConsumerWriterReadErrorTriggerReset(t *testing.T) {
opts := testOptions()
w := newConsumerWriter("badAddr", nil, opts, testConsumerWriterMetrics()).(*consumerWriterImpl)
<-w.resetCh
- w.validConn.Store(true)
+ w.writeState.Lock()
+ w.writeState.validConns = true
+ w.writeState.Unlock()
require.Equal(t, 0, len(w.resetCh))
- err := w.readAcks()
+ err := w.readAcks(0)
require.Error(t, err)
require.Equal(t, errInvalidConnection, err)
require.Equal(t, 1, len(w.resetCh))
@@ -252,11 +265,13 @@ func TestConsumerWriterReadErrorTriggerReset(t *testing.T) {
func TestAutoReset(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockRouter := NewMockackRouter(ctrl)
+
opts := testOptions()
+
w := newConsumerWriter(
"badAddress",
mockRouter,
@@ -287,12 +302,12 @@ func TestAutoReset(t *testing.T) {
w.Init()
- var u uninitializedReadWriter
- for {
- w.writeLock.Lock()
- c := w.conn
- w.writeLock.Unlock()
- if c != u {
+ start := time.Now()
+ for time.Since(start) < 15*time.Second {
+ w.writeState.Lock()
+ validConns := w.writeState.validConns
+ w.writeState.Unlock()
+ if validConns {
break
}
time.Sleep(100 * time.Millisecond)
@@ -324,13 +339,15 @@ func TestConsumerWriterCloseWhileDecoding(t *testing.T) {
require.NoError(t, err)
defer lis.Close()
- w := newConsumerWriter(lis.Addr().String(), nil, testOptions(), testConsumerWriterMetrics()).(*consumerWriterImpl)
+ opts := testOptions()
+
+ w := newConsumerWriter(lis.Addr().String(), nil, opts, testConsumerWriterMetrics()).(*consumerWriterImpl)
var wg sync.WaitGroup
wg.Add(1)
go func() {
wg.Done()
- require.Error(t, w.decoder.Decode(&testMsg))
+ require.Error(t, w.writeState.conns[0].decoder.Decode(&testMsg))
}()
wg.Wait()
time.Sleep(time.Second)
@@ -344,19 +361,28 @@ func TestConsumerWriterResetWhileDecoding(t *testing.T) {
require.NoError(t, err)
defer lis.Close()
- w := newConsumerWriter(lis.Addr().String(), nil, testOptions(), testConsumerWriterMetrics()).(*consumerWriterImpl)
+ opts := testOptions()
+
+ w := newConsumerWriter(lis.Addr().String(), nil, opts, testConsumerWriterMetrics()).(*consumerWriterImpl)
var wg sync.WaitGroup
wg.Add(1)
go func() {
wg.Done()
- w.decodeLock.Lock()
- require.Error(t, w.decoder.Decode(&testMsg))
- w.decodeLock.Unlock()
+
+ w.writeState.Lock()
+ conn := w.writeState.conns[0]
+ w.writeState.Unlock()
+
+ require.Error(t, conn.decoder.Decode(&testMsg))
}()
wg.Wait()
time.Sleep(time.Second)
- w.reset(new(net.TCPConn))
+ w.reset(resetOptions{
+ connections: []io.ReadWriteCloser{new(net.TCPConn)},
+ at: w.nowFn(),
+ validConns: true,
+ })
}
func testOptions() Options {
@@ -374,6 +400,7 @@ func testOptions() Options {
func testConnectionOptions() ConnectionOptions {
return NewConnectionOptions().
+ SetNumConnections(1).
SetRetryOptions(retry.NewOptions().SetInitialBackoff(200 * time.Millisecond).SetMaxBackoff(time.Second)).
SetFlushInterval(100 * time.Millisecond).
SetResetDelay(100 * time.Millisecond)
@@ -386,7 +413,7 @@ func testConsumeAndAckOnConnection(
decOpts proto.Options,
) {
serverEncoder := proto.NewEncoder(encOpts)
- serverDecoder := proto.NewDecoder(conn, decOpts)
+ serverDecoder := proto.NewDecoder(conn, decOpts, 10)
var msg msgpb.Message
assert.NoError(t, serverDecoder.Decode(&msg))
@@ -422,5 +449,5 @@ func write(w consumerWriter, m proto.Marshaler) error {
if err != nil {
return err
}
- return w.Write(testEncoder.Bytes())
+ return w.Write(0, testEncoder.Bytes())
}
diff --git a/src/msg/producer/writer/message.go b/src/msg/producer/writer/message.go
index 6c9501ff36..5e8db247b9 100644
--- a/src/msg/producer/writer/message.go
+++ b/src/msg/producer/writer/message.go
@@ -38,14 +38,14 @@ type message struct {
retried int
// NB(cw) isAcked could be accessed concurrently by the background thread
// in message writer and acked by consumer service writers.
- isAcked *atomic.Bool
+ // Safe to store value inside struct, as message is never copied by value
+ isAcked atomic.Bool
}
func newMessage() *message {
return &message{
retryAtNanos: 0,
retried: 0,
- isAcked: atomic.NewBool(false),
}
}
diff --git a/src/msg/producer/writer/message_benchmark_test.go b/src/msg/producer/writer/message_benchmark_test.go
new file mode 100644
index 0000000000..76d1a10241
--- /dev/null
+++ b/src/msg/producer/writer/message_benchmark_test.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2018 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package writer
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/msg/producer"
+)
+
+var (
+ // BenchMessage prevents optimization
+ BenchMessage *message
+ // BenchBool prevents optimization
+ BenchBool bool
+)
+
+type emptyMessage struct{}
+
+// Shard returns the shard of the message.
+func (e emptyMessage) Shard() uint32 { return 0 }
+
+// Bytes returns the bytes of the message.
+func (e emptyMessage) Bytes() []byte { return nil }
+
+// Size returns the size of the bytes of the message.
+func (e emptyMessage) Size() int { return 0 }
+
+// Finalize will be called by producer to indicate the end of its lifecycle.
+func (e emptyMessage) Finalize(_ producer.FinalizeReason) {}
+
+func BenchmarkMessageAtomics(b *testing.B) {
+ rm := producer.NewRefCountedMessage(emptyMessage{}, nil)
+ msg := newMessage()
+ for n := 0; n < b.N; n++ {
+ msg.Set(metadata{}, rm, 500)
+ rm.IncRef()
+ msg.Ack()
+ BenchBool = msg.IsAcked()
+ _, BenchBool = msg.Marshaler()
+ msg.Close()
+ BenchMessage = msg
+ }
+}
+
+func BenchmarkMessageAtomicsAllocs(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ rm := producer.NewRefCountedMessage(emptyMessage{}, nil)
+ msg := newMessage()
+ msg.Set(metadata{}, rm, 500)
+ rm.IncRef()
+ msg.Ack()
+ BenchBool = msg.IsAcked()
+ _, BenchBool = msg.Marshaler()
+ msg.Close()
+ BenchMessage = msg
+ }
+}
diff --git a/src/msg/producer/writer/message_pool_test.go b/src/msg/producer/writer/message_pool_test.go
index 5adbc77b83..34589d8cf6 100644
--- a/src/msg/producer/writer/message_pool_test.go
+++ b/src/msg/producer/writer/message_pool_test.go
@@ -26,8 +26,8 @@ import (
"github.com/m3db/m3/src/msg/generated/proto/msgpb"
"github.com/m3db/m3/src/msg/producer"
"github.com/m3db/m3/src/x/pool"
+ xtest "github.com/m3db/m3/src/x/test"
- "github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
@@ -35,7 +35,7 @@ func TestMessagePool(t *testing.T) {
p := newMessagePool(pool.NewObjectPoolOptions().SetSize(1))
p.Init()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
diff --git a/src/msg/producer/writer/message_writer.go b/src/msg/producer/writer/message_writer.go
index 0067033010..37ee1168a9 100644
--- a/src/msg/producer/writer/message_writer.go
+++ b/src/msg/producer/writer/message_writer.go
@@ -107,7 +107,7 @@ type messageWriterMetrics struct {
func newMessageWriterMetrics(
scope tally.Scope,
- samplingRate float64,
+ opts instrument.TimerOptions,
) messageWriterMetrics {
return messageWriterMetrics{
writeSuccess: scope.Counter("write-success"),
@@ -133,10 +133,10 @@ func newMessageWriterMetrics(
map[string]string{"reason": "ttl-expire"},
).Counter("message-dropped"),
messageRetry: scope.Counter("message-retry"),
- messageConsumeLatency: instrument.MustCreateSampledTimer(scope.Timer("message-consume-latency"), samplingRate),
- messageWriteDelay: instrument.MustCreateSampledTimer(scope.Timer("message-write-delay"), samplingRate),
- scanBatchLatency: instrument.MustCreateSampledTimer(scope.Timer("scan-batch-latency"), samplingRate),
- scanTotalLatency: instrument.MustCreateSampledTimer(scope.Timer("scan-total-latency"), samplingRate),
+ messageConsumeLatency: instrument.NewTimer(scope, "message-consume-latency", opts),
+ messageWriteDelay: instrument.NewTimer(scope, "message-write-delay", opts),
+ scanBatchLatency: instrument.NewTimer(scope, "scan-batch-latency", opts),
+ scanTotalLatency: instrument.NewTimer(scope, "scan-total-latency", opts),
}
}
@@ -149,6 +149,7 @@ type messageWriterImpl struct {
retryOpts retry.Options
r *rand.Rand
encoder proto.Encoder
+ numConnections int
msgID uint64
queue *list.List
@@ -186,6 +187,7 @@ func newMessageWriter(
retryOpts: opts.MessageRetryOptions(),
r: rand.New(rand.NewSource(nowFn().UnixNano())),
encoder: proto.NewEncoder(opts.EncoderOptions()),
+ numConnections: opts.ConnectionOptions().NumConnections(),
msgID: 0,
queue: list.New(),
acks: newAckHelper(opts.InitialAckMapSize()),
@@ -258,10 +260,13 @@ func (w *messageWriterImpl) write(
return err
}
var (
- written = false
+ // NB(r): Always select the same connection index per shard.
+ connIndex = int(w.replicatedShardID % uint64(w.numConnections))
+ written = false
)
for i := len(iterationIndexes) - 1; i >= 0; i-- {
- if err := consumerWriters[randIndex(iterationIndexes, i)].Write(w.encoder.Bytes()); err != nil {
+ consumerWriter := consumerWriters[randIndex(iterationIndexes, i)]
+ if err := consumerWriter.Write(connIndex, w.encoder.Bytes()); err != nil {
w.m.oneConsumerWriteError.Inc(1)
continue
}
@@ -386,14 +391,14 @@ func (w *messageWriterImpl) scanMessageQueue() {
func (w *messageWriterImpl) writeBatch(
iterationIndexes []int,
consumerWriters []consumerWriter,
- toBeRetried []*message,
+ messages []*message,
) error {
if len(consumerWriters) == 0 {
// Not expected in a healthy/valid placement.
- w.m.noWritersError.Inc(int64(len(toBeRetried)))
+ w.m.noWritersError.Inc(int64(len(messages)))
return errNoWriters
}
- for _, m := range toBeRetried {
+ for _, m := range messages {
if err := w.write(iterationIndexes, consumerWriters, m); err != nil {
return err
}
diff --git a/src/msg/producer/writer/message_writer_test.go b/src/msg/producer/writer/message_writer_test.go
index 01c76d77e3..053fc4652b 100644
--- a/src/msg/producer/writer/message_writer_test.go
+++ b/src/msg/producer/writer/message_writer_test.go
@@ -27,7 +27,9 @@ import (
"time"
"github.com/m3db/m3/src/msg/producer"
+ "github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/retry"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/fortytw2/leaktest"
"github.com/golang/mock/gomock"
@@ -122,7 +124,7 @@ func TestMessageWriterWithPooling(t *testing.T) {
w.AddConsumerWriter(cw)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm1 := producer.NewMockMessage(ctrl)
@@ -205,7 +207,7 @@ func TestMessageWriterWithoutPooling(t *testing.T) {
w.AddConsumerWriter(cw)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm1 := producer.NewMockMessage(ctrl)
@@ -272,7 +274,7 @@ func TestMessageWriterRetryWithoutPooling(t *testing.T) {
a := newAckRouter(1)
a.Register(200, w)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -332,7 +334,7 @@ func TestMessageWriterRetryWithPooling(t *testing.T) {
a := newAckRouter(1)
a.Register(200, w)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -387,7 +389,7 @@ func TestMessageWriterCleanupDroppedMessage(t *testing.T) {
opts := testOptions()
w := newMessageWriter(200, testMessagePool(opts), opts, testMessageWriterMetrics())
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -431,7 +433,7 @@ func TestMessageWriterCleanupAckedMessage(t *testing.T) {
w.Init()
defer w.Close()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -479,7 +481,7 @@ func TestMessageWriterCleanupAckedMessage(t *testing.T) {
}
func TestMessageWriterCutoverCutoff(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
w := newMessageWriter(200, testMessagePool(testOptions()), nil, testMessageWriterMetrics()).(*messageWriterImpl)
@@ -504,7 +506,7 @@ func TestMessageWriterCutoverCutoff(t *testing.T) {
}
func TestMessageWriterKeepNewWritesInOrderInFrontOfTheQueue(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
opts := testOptions().SetMessageRetryOptions(
@@ -547,7 +549,7 @@ func TestMessageWriterKeepNewWritesInOrderInFrontOfTheQueue(t *testing.T) {
}
func TestMessageWriterRetryIterateBatchFullScan(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retryBatchSize := 2
@@ -611,7 +613,7 @@ func TestMessageWriterRetryIterateBatchFullScan(t *testing.T) {
}
func TestMessageWriterRetryIterateBatchFullScanWithMessageTTL(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retryBatchSize := 2
@@ -672,7 +674,7 @@ func TestMessageWriterRetryIterateBatchFullScanWithMessageTTL(t *testing.T) {
}
func TestMessageWriterRetryIterateBatchNotFullScan(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
retryBatchSize := 100
@@ -774,7 +776,7 @@ func TestMessageWriterCloseCleanupAllMessages(t *testing.T) {
opts := testOptions()
w := newMessageWriter(200, nil, opts, testMessageWriterMetrics()).(*messageWriterImpl)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -793,7 +795,7 @@ func TestMessageWriterCloseCleanupAllMessages(t *testing.T) {
}
func TestMessageWriterQueueFullScanOnWriteErrors(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
opts := testOptions().SetMessageQueueScanBatchSize(1)
@@ -833,7 +835,7 @@ func testMessagePool(opts Options) messagePool {
}
func testMessageWriterMetrics() messageWriterMetrics {
- return newMessageWriterMetrics(tally.NoopScope, 1)
+ return newMessageWriterMetrics(tally.NoopScope, instrument.TimerOptions{})
}
func validateMessages(t *testing.T, msgs []*producer.RefCountedMessage, w *messageWriterImpl) {
diff --git a/src/msg/producer/writer/options.go b/src/msg/producer/writer/options.go
index a17a655fbf..70562010bc 100644
--- a/src/msg/producer/writer/options.go
+++ b/src/msg/producer/writer/options.go
@@ -23,6 +23,7 @@ package writer
import (
"time"
+ "github.com/m3db/m3/src/cluster/placement"
"github.com/m3db/m3/src/cluster/services"
"github.com/m3db/m3/src/msg/protocol/proto"
"github.com/m3db/m3/src/msg/topic"
@@ -40,18 +41,25 @@ const (
defaultMessageQueueScanBatchSize = 16
defaultInitialAckMapSize = 1024
- defaultConnectionDialTimeout = 10 * time.Second
- defaultConnectionWriteTimeout = time.Second
- defaultConnectionKeepAlivePeriod = time.Minute
+ defaultNumConnections = 4
+ defaultConnectionDialTimeout = 5 * time.Second
+ defaultConnectionWriteTimeout = time.Duration(0)
+ defaultConnectionKeepAlivePeriod = 5 * time.Second
defaultConnectionResetDelay = 2 * time.Second
defaultConnectionFlushInterval = time.Second
- // Using 16K which provides much better performance comparing
+ // Using 65k which provides much better performance comparing
// to lower values like 1k ~ 8k.
- defaultConnectionBufferSize = 16384
+ defaultConnectionBufferSize = 2 << 15 // ~65kb
)
// ConnectionOptions configs the connections.
type ConnectionOptions interface {
+ // NumConnections returns the number of connections.
+ NumConnections() int
+
+ // SetNumConnections sets the number of connections.
+ SetNumConnections(value int) ConnectionOptions
+
// DialTimeout returns the dial timeout.
DialTimeout() time.Duration
@@ -108,6 +116,7 @@ type ConnectionOptions interface {
}
type connectionOptions struct {
+ numConnections int
dialTimeout time.Duration
writeTimeout time.Duration
keepAlivePeriod time.Duration
@@ -122,6 +131,7 @@ type connectionOptions struct {
// NewConnectionOptions creates ConnectionOptions.
func NewConnectionOptions() ConnectionOptions {
return &connectionOptions{
+ numConnections: defaultNumConnections,
dialTimeout: defaultConnectionDialTimeout,
writeTimeout: defaultConnectionWriteTimeout,
keepAlivePeriod: defaultConnectionKeepAlivePeriod,
@@ -134,6 +144,16 @@ func NewConnectionOptions() ConnectionOptions {
}
}
+func (opts *connectionOptions) NumConnections() int {
+ return opts.numConnections
+}
+
+func (opts *connectionOptions) SetNumConnections(value int) ConnectionOptions {
+ o := *opts
+ o.numConnections = value
+ return &o
+}
+
func (opts *connectionOptions) DialTimeout() time.Duration {
return opts.dialTimeout
}
@@ -250,6 +270,12 @@ type Options interface {
// SetServiceDiscovery sets the client to service discovery services.
SetServiceDiscovery(value services.Services) Options
+ // PlacementOptions returns the placement options.
+ PlacementOptions() placement.Options
+
+ // SetPlacementOptions sets the placement options.
+ SetPlacementOptions(value placement.Options) Options
+
// PlacementWatchInitTimeout returns the timeout for placement watch initialization.
PlacementWatchInitTimeout() time.Duration
@@ -338,6 +364,7 @@ type writerOptions struct {
topicService topic.Service
topicWatchInitTimeout time.Duration
services services.Services
+ placementOpts placement.Options
placementWatchInitTimeout time.Duration
messagePoolOptions pool.ObjectPoolOptions
messageRetryOpts retry.Options
@@ -357,6 +384,7 @@ type writerOptions struct {
func NewOptions() Options {
return &writerOptions{
topicWatchInitTimeout: defaultTopicWatchInitTimeout,
+ placementOpts: placement.NewOptions(),
placementWatchInitTimeout: defaultPlacementWatchInitTimeout,
messageRetryOpts: retry.NewOptions(),
messageQueueNewWritesScanInterval: defaultMessageQueueNewWritesScanInterval,
@@ -412,6 +440,16 @@ func (opts *writerOptions) SetServiceDiscovery(value services.Services) Options
return &o
}
+func (opts *writerOptions) PlacementOptions() placement.Options {
+ return opts.placementOpts
+}
+
+func (opts *writerOptions) SetPlacementOptions(value placement.Options) Options {
+ o := *opts
+ o.placementOpts = value
+ return &o
+}
+
func (opts *writerOptions) PlacementWatchInitTimeout() time.Duration {
return opts.placementWatchInitTimeout
}
diff --git a/src/msg/producer/writer/shard_writer_test.go b/src/msg/producer/writer/shard_writer_test.go
index 29ff8536e4..3c6ea3beb0 100644
--- a/src/msg/producer/writer/shard_writer_test.go
+++ b/src/msg/producer/writer/shard_writer_test.go
@@ -31,9 +31,9 @@ import (
"github.com/m3db/m3/src/msg/generated/proto/msgpb"
"github.com/m3db/m3/src/msg/producer"
"github.com/m3db/m3/src/msg/protocol/proto"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/fortytw2/leaktest"
- "github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
@@ -79,7 +79,7 @@ func TestSharedShardWriter(t *testing.T) {
cws,
)
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -163,7 +163,7 @@ func TestReplicatedShardWriter(t *testing.T) {
)
require.Equal(t, 2, len(sw.messageWriters))
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -272,7 +272,7 @@ func TestReplicatedShardWriterRemoveMessageWriter(t *testing.T) {
require.Equal(t, 0, mw1.queue.Len())
require.Equal(t, 0, mw2.queue.Len())
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mm := producer.NewMockMessage(ctrl)
@@ -309,7 +309,7 @@ func TestReplicatedShardWriterRemoveMessageWriter(t *testing.T) {
defer conn.Close()
serverEncoder := proto.NewEncoder(opts.EncoderOptions())
- serverDecoder := proto.NewDecoder(conn, opts.DecoderOptions())
+ serverDecoder := proto.NewDecoder(conn, opts.DecoderOptions(), 10)
var msg msgpb.Message
require.NoError(t, serverDecoder.Decode(&msg))
diff --git a/src/msg/producer/writer/writer.go b/src/msg/producer/writer/writer.go
index 75c6a9fc80..5cbd0458d6 100644
--- a/src/msg/producer/writer/writer.go
+++ b/src/msg/producer/writer/writer.go
@@ -53,7 +53,7 @@ func newWriterMetrics(scope tally.Scope) writerMetrics {
topicUpdateError: scope.Counter("topic-update-error"),
invalidTopicUpdate: scope.Counter("invalid-topic"),
invalidShard: scope.Tagged(map[string]string{"reason": "invalid-shard"}).
- Counter("invalid-write"),
+ Counter("invalid-shard-write"),
numConsumerServices: scope.Gauge("num-consumer-services"),
}
}
diff --git a/src/msg/producer/writer/writer_test.go b/src/msg/producer/writer/writer_test.go
index b8e6cbbb4a..472686bf6a 100644
--- a/src/msg/producer/writer/writer_test.go
+++ b/src/msg/producer/writer/writer_test.go
@@ -35,6 +35,7 @@ import (
"github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/msg/producer"
"github.com/m3db/m3/src/msg/topic"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/fortytw2/leaktest"
"github.com/golang/mock/gomock"
@@ -45,7 +46,7 @@ import (
func TestWriterInitErrorNoTopic(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -64,7 +65,7 @@ func TestWriterInitErrorNoTopic(t *testing.T) {
func TestWriterWriteAfterClosed(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -91,7 +92,7 @@ func TestWriterWriteAfterClosed(t *testing.T) {
func TestWriterWriteWithInvalidShard(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -124,7 +125,7 @@ func TestWriterWriteWithInvalidShard(t *testing.T) {
func TestWriterInvalidTopicUpdate(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -193,7 +194,7 @@ func TestWriterInvalidTopicUpdate(t *testing.T) {
func TestWriterRegisterFilter(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -242,7 +243,7 @@ func TestWriterRegisterFilter(t *testing.T) {
func TestWriterTopicUpdate(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -360,7 +361,7 @@ func TestWriterTopicUpdate(t *testing.T) {
func TestTopicUpdateWithSameConsumerServicesButDifferentOrder(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -462,7 +463,7 @@ func TestTopicUpdateWithSameConsumerServicesButDifferentOrder(t *testing.T) {
func TestWriterWrite(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -587,7 +588,7 @@ func TestWriterWrite(t *testing.T) {
func TestWriterCloseBlocking(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -659,7 +660,7 @@ func TestWriterCloseBlocking(t *testing.T) {
func TestWriterSetMessageTTLNanosDropMetric(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
@@ -812,7 +813,7 @@ func TestWriterSetMessageTTLNanosDropMetric(t *testing.T) {
func TestWriterNumShards(t *testing.T) {
defer leaktest.Check(t)()
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := mem.NewStore()
diff --git a/src/msg/protocol/proto/benchmark_test.go b/src/msg/protocol/proto/benchmark_test.go
index 1b36ae5ece..64d5c8b70c 100644
--- a/src/msg/protocol/proto/benchmark_test.go
+++ b/src/msg/protocol/proto/benchmark_test.go
@@ -21,6 +21,7 @@
package proto
import (
+ "bufio"
"bytes"
"testing"
@@ -29,8 +30,9 @@ import (
func BenchmarkBaseEncodeDecodeRoundTrip(b *testing.B) {
r := bytes.NewReader(nil)
+ buf := bufio.NewReader(r)
encoder := NewEncoder(NewOptions())
- decoder := NewDecoder(r, NewOptions())
+ decoder := NewDecoder(buf, NewOptions(), 10)
encodeMsg := msgpb.Message{
Metadata: msgpb.Metadata{},
Value: make([]byte, 200),
@@ -45,6 +47,7 @@ func BenchmarkBaseEncodeDecodeRoundTrip(b *testing.B) {
b.FailNow()
}
r.Reset(encoder.Bytes())
+ buf.Reset(r)
if err := decoder.Decode(&decodeMsg); err != nil {
b.FailNow()
}
diff --git a/src/msg/protocol/proto/decoder.go b/src/msg/protocol/proto/decoder.go
index 9359e4987b..0db0a6d9cc 100644
--- a/src/msg/protocol/proto/decoder.go
+++ b/src/msg/protocol/proto/decoder.go
@@ -24,27 +24,35 @@ import (
"fmt"
"io"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
)
type decoder struct {
- r io.Reader
- buffer []byte
- bytesPool pool.BytesPool
- maxMessageSize int
+ reader io.Reader
+ rOpts xio.ResettableReaderOptions
+ resettableReader xio.ResettableReader
+ buffer []byte
+ bytesPool pool.BytesPool
+ maxMessageSize int
+ opts Options
}
// NewDecoder decodes a new decoder, the implementation is not thread safe.
-func NewDecoder(r io.Reader, opts Options) Decoder {
+func NewDecoder(r io.Reader, opts Options, bufferSize int) Decoder {
if opts == nil {
opts = NewOptions()
}
pool := opts.BytesPool()
+ rOpts := xio.ResettableReaderOptions{ReadBufferSize: bufferSize}
return &decoder{
- r: r,
- buffer: getByteSliceWithLength(sizeEncodingLength, pool),
- bytesPool: pool,
- maxMessageSize: opts.MaxMessageSize(),
+ reader: r,
+ resettableReader: opts.RWOptions().ResettableReaderFn()(r, rOpts),
+ buffer: getByteSliceWithLength(sizeEncodingLength, pool),
+ bytesPool: pool,
+ maxMessageSize: opts.MaxMessageSize(),
+ rOpts: rOpts,
+ opts: opts,
}
}
@@ -53,15 +61,19 @@ func (d *decoder) Decode(m Unmarshaler) error {
if err != nil {
return err
}
- d.buffer = growDataBufferIfNeeded(d.buffer, sizeEncodingLength+size, d.bytesPool)
if size > d.maxMessageSize {
- return fmt.Errorf("decoded message size %d is larger than maximum supported size %d", size, d.maxMessageSize)
+ d.resettableReader.Reset(d.reader)
+ return fmt.Errorf(
+ "proto decoded message size %d is larger than maximum supported size %d",
+ size, d.maxMessageSize)
}
+ d.buffer = growDataBufferIfNeeded(d.buffer, sizeEncodingLength+size, d.bytesPool)
return d.decodeData(d.buffer[sizeEncodingLength:sizeEncodingLength+size], m)
}
func (d *decoder) decodeSize() (int, error) {
- if _, err := io.ReadFull(d.r, d.buffer[:sizeEncodingLength]); err != nil {
+ _, err := io.ReadFull(d.resettableReader, d.buffer[:sizeEncodingLength])
+ if err != nil {
return 0, err
}
size := sizeEncodeDecoder.Uint32(d.buffer[:sizeEncodingLength])
@@ -69,12 +81,14 @@ func (d *decoder) decodeSize() (int, error) {
}
func (d *decoder) decodeData(buffer []byte, m Unmarshaler) error {
- if _, err := io.ReadFull(d.r, buffer); err != nil {
+ _, err := io.ReadFull(d.resettableReader, buffer)
+ if err != nil {
return err
}
return m.Unmarshal(buffer)
}
func (d *decoder) ResetReader(r io.Reader) {
- d.r = r
+ d.reader = r
+ d.resettableReader.Reset(r)
}
diff --git a/src/msg/protocol/proto/options.go b/src/msg/protocol/proto/options.go
index 4a2213eefa..286c1b3648 100644
--- a/src/msg/protocol/proto/options.go
+++ b/src/msg/protocol/proto/options.go
@@ -21,6 +21,7 @@
package proto
import (
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
)
@@ -32,12 +33,14 @@ var (
func NewOptions() Options {
return &options{
maxMessageSize: defaultMaxMessageSize,
+ rwOpts: xio.NewOptions(),
}
}
type options struct {
maxMessageSize int
bytesPool pool.BytesPool
+ rwOpts xio.Options
}
func (opts *options) MaxMessageSize() int {
@@ -59,3 +62,13 @@ func (opts *options) SetBytesPool(value pool.BytesPool) Options {
o.bytesPool = value
return &o
}
+
+func (opts *options) SetRWOptions(value xio.Options) Options {
+ o := *opts
+ o.rwOpts = value
+ return &o
+}
+
+func (opts *options) RWOptions() xio.Options {
+ return opts.rwOpts
+}
diff --git a/src/msg/protocol/proto/roundtrip_test.go b/src/msg/protocol/proto/roundtrip_test.go
index 7db722ab59..de5202c6c8 100644
--- a/src/msg/protocol/proto/roundtrip_test.go
+++ b/src/msg/protocol/proto/roundtrip_test.go
@@ -21,6 +21,7 @@
package proto
import (
+ "bufio"
"bytes"
"net"
"testing"
@@ -36,8 +37,10 @@ func TestBaseEncodeDecodeRoundTripWithoutPool(t *testing.T) {
require.Equal(t, 4, len(enc.buffer))
require.Equal(t, 4, cap(enc.buffer))
require.Empty(t, enc.Bytes())
+
r := bytes.NewReader(nil)
- dec := NewDecoder(r, NewOptions()).(*decoder)
+ buf := bufio.NewReader(r)
+ dec := NewDecoder(buf, NewOptions(), 10).(*decoder)
require.Equal(t, 4, len(dec.buffer))
require.Equal(t, 4, cap(dec.buffer))
encodeMsg := msgpb.Message{
@@ -69,7 +72,8 @@ func TestBaseEncodeDecodeRoundTripWithPool(t *testing.T) {
require.Equal(t, 8, cap(enc.buffer))
r := bytes.NewReader(nil)
- dec := NewDecoder(r, NewOptions().SetBytesPool(p)).(*decoder)
+ buf := bufio.NewReader(r)
+ dec := NewDecoder(buf, NewOptions().SetBytesPool(p), 10).(*decoder)
require.Equal(t, 8, len(dec.buffer))
require.Equal(t, 8, cap(dec.buffer))
encodeMsg := msgpb.Message{
@@ -94,7 +98,8 @@ func TestBaseEncodeDecodeRoundTripWithPool(t *testing.T) {
func TestResetReader(t *testing.T) {
enc := NewEncoder(nil)
- dec := NewDecoder(bytes.NewReader(nil), nil)
+ r := bytes.NewReader(nil)
+ dec := NewDecoder(r, nil, 10)
encodeMsg := msgpb.Message{
Metadata: msgpb.Metadata{
Shard: 1,
@@ -143,16 +148,29 @@ func TestDecodeMessageLargerThanMaxSize(t *testing.T) {
require.NoError(t, err)
decodeMsg := msgpb.Message{}
- opts := NewOptions().SetMaxMessageSize(4)
- dec := NewDecoder(bytes.NewReader(enc.Bytes()), opts)
+ opts := NewOptions().SetMaxMessageSize(8)
+ buf := bufio.NewReader(bytes.NewReader(enc.Bytes()))
+ dec := NewDecoder(buf, opts, 10)
+
+ // NB(r): We need to make sure does not grow the buffer
+ // if over max size, so going to take size of buffer, make
+ // sure its sizeEncodingLength so we can measure if it increases at all.
+ require.Equal(t, sizeEncodingLength, cap(dec.(*decoder).buffer))
+
err = dec.Decode(&decodeMsg)
require.Error(t, err)
require.Contains(t, err.Error(), "larger than maximum supported size")
+
+ // Make sure did not grow buffer before returning error.
+ require.Equal(t, sizeEncodingLength, cap(dec.(*decoder).buffer))
}
func TestEncodeDecodeRoundTrip(t *testing.T) {
+ r := bytes.NewReader(nil)
+ buf := bufio.NewReader(r)
+
enc := NewEncoder(nil)
- dec := NewDecoder(nil, nil)
+ dec := NewDecoder(buf, nil, 10)
clientConn, serverConn := net.Pipe()
dec.ResetReader(serverConn)
diff --git a/src/msg/protocol/proto/types.go b/src/msg/protocol/proto/types.go
index e826a69d53..774f7aa14c 100644
--- a/src/msg/protocol/proto/types.go
+++ b/src/msg/protocol/proto/types.go
@@ -23,6 +23,7 @@ package proto
import (
"io"
+ xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
)
@@ -73,4 +74,10 @@ type Options interface {
// SetBytesPool sets the bytes pool.
SetBytesPool(value pool.BytesPool) Options
+
+ // SetRWOptions sets RW options.
+ SetRWOptions(value xio.Options) Options
+
+ // RWOptions returns the RW options.
+ RWOptions() xio.Options
}
diff --git a/src/query/README.md b/src/query/README.md
index 461ac40c64..db14974813 100644
--- a/src/query/README.md
+++ b/src/query/README.md
@@ -1,6 +1,6 @@
## WARNING: This is Alpha software and not intended for use until a stable release.
-# M3Coordinator [![GoDoc][doc-img]][doc]
+# M3Coordinator
M3Coordinator is a service which provides APIs for reading/writing to [M3DB](https://github.com/m3db/m3) at a global and placement specific level.
It also acts as a bridge between [Prometheus](https://github.com/prometheus/prometheus) and [M3DB](https://github.com/m3db/m3). Using this bridge, [M3DB](https://github.com/m3db/m3) acts as a long term storage for [Prometheus](https://github.com/prometheus/prometheus) using the [remote read/write endpoints](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto).
@@ -26,17 +26,15 @@ Finally, you can spin up the two containers using `docker-compose` within the `d
$ docker-compose up
-> Note: The default local ports for Prometheus and m3coordinator are `9090` and `7201`, respectively, and the default `prometheus.yml` file is `docker/prometheus.yml`
->
->If you want to override these, you can pass in the following environment variables to the `docker-compose` command:
->
-> `LOCAL_PROM_PORT`
->
-> `LOCAL_M3COORD_PORT`
->
-> `LOCAL_PROM_YML`
->
-> (e.g. `$ LOCAL_PROM_PORT=XXXX LOCAL_M3COORD_PORT=XXXX LOCAL_PROM_YML=/path/to/yml docker-compose up`)
+*Note:* The default local ports for Prometheus and m3coordinator are `9090` and `7201`, respectively, and the default `prometheus.yml` file is `docker/prometheus.yml`
+
+If you want to override these, you can pass in the following environment variables to the `docker-compose` command:
+
+ - `LOCAL_PROM_PORT`
+ - `LOCAL_M3COORD_PORT`
+ - `LOCAL_PROM_YML`
+
+(e.g. `$ LOCAL_PROM_PORT=XXXX LOCAL_M3COORD_PORT=XXXX LOCAL_PROM_YML=/path/to/yml docker-compose up`)
**Running m3coordinator locally (on mac only) and Prometheus in Docker container (for development):**
@@ -62,7 +60,6 @@ Setup GCP for m3coordinator:
1. Make sure you select a base image with Docker pre-installed
2. Follow steps 1-5 from the above section (clone `m3coordinator` instead of `m3db`)
- - Depending on the status of https://github.com/m3db/m3/pull/452, you may need to update the version of m3db (6874b8af8e9ec682551d49ad3e3250dfb4f4ae1f) and m3x (7ea8c2f35f9fa0f52bd189e44b11113d708acada) in `glide.yaml`
3. The config file, which is located at `m3coordinator/benchmark/configs/benchmark.yml` will need the same config topology as the m3db config
4. Run m3coordinator - you should see this message with the number of hosts you specified: `[I] successfully updated topology to 3 hosts` with no other warning or error messsages
$ ./bin/m3coordinator --config.file benchmark/configs/benchmark.yml
diff --git a/src/query/api/experimental/annotated/iter.go b/src/query/api/experimental/annotated/iter.go
index ed3c22dc58..2d408df81e 100644
--- a/src/query/api/experimental/annotated/iter.go
+++ b/src/query/api/experimental/annotated/iter.go
@@ -29,6 +29,12 @@ import (
xtime "github.com/m3db/m3/src/x/time"
)
+var defaultValue = ingest.IterValue{
+ Tags: models.EmptyTags(),
+ Attributes: ts.DefaultSeriesAttributes(),
+ Metadata: ts.Metadata{},
+}
+
type datapoint struct {
ts.Datapoint
@@ -51,6 +57,7 @@ type iter struct {
idx int
tags []models.Tags
datapoints []datapoint
+ metadatas []ts.Metadata
}
func newIter(
@@ -80,12 +87,22 @@ func (i *iter) Next() bool {
return i.idx < len(i.tags)
}
-func (i *iter) Current() (models.Tags, ts.Datapoints, xtime.Unit, []byte) {
+func (i *iter) Current() ingest.IterValue {
if len(i.tags) == 0 || i.idx < 0 || i.idx >= len(i.tags) {
- return models.EmptyTags(), nil, 0, nil
+ return defaultValue
}
curr := i.datapoints[i.idx]
- return i.tags[i.idx], ts.Datapoints{curr.Datapoint}, xtime.Millisecond, curr.annotation
+ value := ingest.IterValue{
+ Tags: i.tags[i.idx],
+ Datapoints: ts.Datapoints{curr.Datapoint},
+ Attributes: ts.DefaultSeriesAttributes(),
+ Unit: xtime.Millisecond,
+ Annotation: curr.annotation,
+ }
+ if i.idx < len(i.metadatas) {
+ value.Metadata = i.metadatas[i.idx]
+ }
+ return value
}
func (i *iter) Reset() error {
@@ -96,3 +113,13 @@ func (i *iter) Reset() error {
func (i *iter) Error() error {
return nil
}
+
+func (i *iter) SetCurrentMetadata(metadata ts.Metadata) {
+ if len(i.metadatas) == 0 {
+ i.metadatas = make([]ts.Metadata, len(i.tags))
+ }
+ if i.idx < 0 || i.idx >= len(i.metadatas) {
+ return
+ }
+ i.metadatas[i.idx] = metadata
+}
diff --git a/src/query/api/experimental/annotated/iter_test.go b/src/query/api/experimental/annotated/iter_test.go
index e5547a9801..69b7e4bdf9 100644
--- a/src/query/api/experimental/annotated/iter_test.go
+++ b/src/query/api/experimental/annotated/iter_test.go
@@ -155,11 +155,11 @@ func TestIter(t *testing.T) {
func testOutput(t *testing.T, iter *iter, want iterOutput) {
require.True(t, iter.Next())
- tags, datapoints, unit, annotation := iter.Current()
- assert.Equal(t, want.tags, tags)
- assert.Equal(t, want.datapoints, datapoints)
- assert.Equal(t, want.unit, unit)
- assert.Equal(t, want.annotation, annotation)
+ value := iter.Current()
+ assert.Equal(t, want.tags, value.Tags)
+ assert.Equal(t, want.datapoints, value.Datapoints)
+ assert.Equal(t, want.unit, value.Unit)
+ assert.Equal(t, want.annotation, value.Annotation)
}
type iterOutput struct {
diff --git a/src/query/api/v1/handler/close.go b/src/query/api/v1/handler/close.go
index 21d0231541..1487e47851 100644
--- a/src/query/api/v1/handler/close.go
+++ b/src/query/api/v1/handler/close.go
@@ -28,29 +28,101 @@ import (
"github.com/m3db/m3/src/x/instrument"
)
-// CloseWatcher watches for CloseNotify and context timeout. It is best effort and may sometimes not close the channel relying on gc
+// CancelWatcher is an interface that wraps a WatchForCancel method.
+// TODO: make this generic middleware, rather than applied per function.
+type CancelWatcher interface {
+ // WatchForCancel watches on the given context, and applies
+ // the given cancellation function.
+ WatchForCancel(context.Context, context.CancelFunc)
+}
+
+type canceller struct {
+ notifier http.CloseNotifier
+ iOpts instrument.Options
+}
+
+func (c *canceller) WatchForCancel(
+ ctx context.Context,
+ cancel context.CancelFunc,
+) {
+ logger := logging.WithContext(ctx, c.iOpts)
+ notify := c.notifier.CloseNotify()
+ go func() {
+ // Wait for either the request to finish
+ // or for the client to disconnect
+ select {
+ case <-notify:
+ logger.Warn("connection closed by client")
+ cancel()
+ case <-ctx.Done():
+ // We only care about the time out case and not other cancellations
+ if ctx.Err() == context.DeadlineExceeded {
+ logger.Warn("request timed out")
+ }
+ }
+ }()
+}
+
+type ctxCanceller struct {
+ iOpts instrument.Options
+}
+
+func (c *ctxCanceller) WatchForCancel(
+ ctx context.Context, _ context.CancelFunc,
+) {
+ logger := logging.WithContext(ctx, c.iOpts)
+ go func() {
+ select {
+ case <-ctx.Done():
+ // We only care about the time out case and not other cancellations
+ if ctx.Err() == context.DeadlineExceeded {
+ logger.Warn("request timed out")
+ }
+ }
+ }()
+}
+
+// NewResponseWriterCanceller creates a canceller on the given context with
+// the given response writer.
+func NewResponseWriterCanceller(
+ w http.ResponseWriter,
+ iOpts instrument.Options,
+) CancelWatcher {
+ notifier, ok := w.(http.CloseNotifier)
+ if !ok {
+ return &ctxCanceller{iOpts: iOpts}
+ }
+
+ return &canceller{notifier: notifier, iOpts: iOpts}
+}
+
+// CloseWatcher watches for CloseNotify and context timeout.
+// It is best effort and may sometimes not close the channel relying on GC.
func CloseWatcher(
ctx context.Context,
cancel context.CancelFunc,
w http.ResponseWriter,
instrumentOpts instrument.Options,
) {
+ notifier, ok := w.(http.CloseNotifier)
+ if !ok {
+ return
+ }
+
logger := logging.WithContext(ctx, instrumentOpts)
- if notifier, ok := w.(http.CloseNotifier); ok {
- notify := notifier.CloseNotify()
- go func() {
- // Wait for either the request to finish
- // or for the client to disconnect
- select {
- case <-notify:
- logger.Warn("connection closed by client")
- cancel()
- case <-ctx.Done():
- // We only care about the time out case and not other cancellations
- if ctx.Err() == context.DeadlineExceeded {
- logger.Warn("request timed out")
- }
+ notify := notifier.CloseNotify()
+ go func() {
+ // Wait for either the request to finish
+ // or for the client to disconnect
+ select {
+ case <-notify:
+ logger.Warn("connection closed by client")
+ cancel()
+ case <-ctx.Done():
+ // We only care about the time out case and not other cancellations
+ if ctx.Err() == context.DeadlineExceeded {
+ logger.Warn("request timed out")
}
- }()
- }
+ }
+ }()
}
diff --git a/src/query/api/v1/handler/close_test.go b/src/query/api/v1/handler/close_test.go
index a9222962ff..1a748ce39a 100644
--- a/src/query/api/v1/handler/close_test.go
+++ b/src/query/api/v1/handler/close_test.go
@@ -35,7 +35,17 @@ func TestCloseWatcher(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
w := httptest.NewRecorder()
CloseWatcher(ctx, cancel, w, instrument.NewOptions())
- assert.Nil(t, ctx.Err())
+ assert.NoError(t, ctx.Err())
time.Sleep(100 * time.Millisecond)
- assert.NotNil(t, ctx.Err())
+ assert.Error(t, ctx.Err())
+}
+
+func TestResponseWriteCanceller(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
+ w := httptest.NewRecorder()
+ canceller := NewResponseWriterCanceller(w, instrument.NewOptions())
+ canceller.WatchForCancel(ctx, cancel)
+ assert.NoError(t, ctx.Err())
+ time.Sleep(100 * time.Millisecond)
+ assert.Error(t, ctx.Err())
}
diff --git a/src/query/api/v1/handler/database/config_bootstrappers_get_test.go b/src/query/api/v1/handler/database/config_bootstrappers_get_test.go
index b9cd53d92c..df46f40f98 100644
--- a/src/query/api/v1/handler/database/config_bootstrappers_get_test.go
+++ b/src/query/api/v1/handler/database/config_bootstrappers_get_test.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/dbnode/kvconfig"
"github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
xtest "github.com/m3db/m3/src/x/test"
"github.com/gogo/protobuf/proto"
@@ -71,13 +72,14 @@ func TestConfigGetBootstrappersHandler(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
- expectedResponse := `
- {
- "values": ["filesystem", "commitlog", "peers", "uninitialized_topology"]
+ expectedResp := xjson.Map{
+ "values": xjson.Array{"filesystem", "commitlog", "peers", "uninitialized_topology"},
}
- `
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+
+ expected := xtest.MustPrettyJSONMap(t, expectedResp)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestConfigGetBootstrappersHandlerNotFound(t *testing.T) {
diff --git a/src/query/api/v1/handler/database/config_bootstrappers_set_test.go b/src/query/api/v1/handler/database/config_bootstrappers_set_test.go
index 4faeea5174..8ae3a3fe8e 100644
--- a/src/query/api/v1/handler/database/config_bootstrappers_set_test.go
+++ b/src/query/api/v1/handler/database/config_bootstrappers_set_test.go
@@ -24,12 +24,12 @@ import (
"io/ioutil"
"net/http"
"net/http/httptest"
- "strings"
"testing"
"github.com/m3db/m3/src/cluster/generated/proto/commonpb"
"github.com/m3db/m3/src/dbnode/kvconfig"
"github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
@@ -46,11 +46,9 @@ func TestConfigSetBootstrappersHandler(t *testing.T) {
instrument.NewOptions())
w := httptest.NewRecorder()
- jsonInput := `
- {
- "values": ["filesystem", "commitlog", "peers", "uninitialized_topology"]
- }
- `
+ jsonInput := xjson.Map{
+ "values": xjson.Array{"filesystem", "commitlog", "peers", "uninitialized_topology"},
+ }
mockStore.EXPECT().
Set(kvconfig.BootstrapperKey, gomock.Any()).
@@ -61,7 +59,8 @@ func TestConfigSetBootstrappersHandler(t *testing.T) {
}, value.Values)
})
- req := httptest.NewRequest("POST", "/database/config/bootstrappers", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/config/bootstrappers",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
handler.ServeHTTP(w, req)
@@ -71,13 +70,14 @@ func TestConfigSetBootstrappersHandler(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
- expectedResponse := `
- {
- "values": ["filesystem", "commitlog", "peers", "uninitialized_topology"]
+ expectedResp := xjson.Map{
+ "values": xjson.Array{"filesystem", "commitlog", "peers", "uninitialized_topology"},
}
- `
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+
+ expected := xtest.MustPrettyJSONMap(t, expectedResp)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestConfigSetBootstrappersHandlerNoValues(t *testing.T) {
@@ -89,13 +89,12 @@ func TestConfigSetBootstrappersHandlerNoValues(t *testing.T) {
instrument.NewOptions())
w := httptest.NewRecorder()
- jsonInput := `
- {
- "values": []
- }
- `
+ jsonInput := xjson.Map{
+ "values": xjson.Array{},
+ }
- req := httptest.NewRequest("POST", "/database/config/bootstrappers", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/config/bootstrappers",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
handler.ServeHTTP(w, req)
@@ -113,13 +112,12 @@ func TestConfigSetBootstrappersHandlerInvalidValue(t *testing.T) {
instrument.NewOptions())
w := httptest.NewRecorder()
- jsonInput := `
- {
- "values": ["filesystem", "foo"]
- }
- `
+ jsonInput := xjson.Map{
+ "values": xjson.Array{"filesystem", "foo"},
+ }
- req := httptest.NewRequest("POST", "/database/config/bootstrappers", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/config/bootstrappers",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
handler.ServeHTTP(w, req)
diff --git a/src/query/api/v1/handler/database/create.go b/src/query/api/v1/handler/database/create.go
index 3e1e4e704a..ab672cb9ad 100644
--- a/src/query/api/v1/handler/database/create.go
+++ b/src/query/api/v1/handler/database/create.go
@@ -203,7 +203,9 @@ func (h *createHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- nsRegistry, err := h.namespaceGetHandler.Get()
+ opts := handleroptions.NewServiceOptions(h.serviceNameAndDefaults(),
+ r.Header, nil)
+ nsRegistry, err := h.namespaceGetHandler.Get(opts)
if err != nil {
logger.Error("unable to retrieve existing namespaces", zap.Error(err))
xhttp.Error(w, err, http.StatusInternalServerError)
@@ -221,8 +223,6 @@ func (h *createHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- opts := handleroptions.NewServiceOptions(h.serviceNameAndDefaults(),
- r.Header, nil)
nsRegistry, err = h.namespaceAddHandler.Add(namespaceRequest, opts)
if err != nil {
logger.Error("unable to add namespace", zap.Error(err))
diff --git a/src/query/api/v1/handler/database/create_test.go b/src/query/api/v1/handler/database/create_test.go
index 3c676ffef1..15ef14d6ff 100644
--- a/src/query/api/v1/handler/database/create_test.go
+++ b/src/query/api/v1/handler/database/create_test.go
@@ -21,14 +21,14 @@
package database
import (
+ "bytes"
+ "encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
- "strings"
"testing"
"time"
- "unicode"
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/generated/proto/placementpb"
@@ -40,6 +40,7 @@ import (
"github.com/m3db/m3/src/query/api/v1/handler/namespace"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
@@ -102,14 +103,13 @@ func testLocalType(t *testing.T, providedType string, placementExists bool) {
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := fmt.Sprintf(`
- {
- "namespaceName": "testNamespace",
- "type": "%s"
- }
- `, providedType)
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": providedType,
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
mockKV.EXPECT().Get(namespace.M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound).Times(2)
@@ -170,6 +170,7 @@ func testLocalType(t *testing.T, providedType string, placementExists bool) {
"enabled": true,
"blockSizeNanos": "3600000000000"
},
+ "runtimeOptions": null,
"schemaOptions": null,
"coldWritesEnabled": false
}
@@ -188,7 +189,10 @@ func testLocalType(t *testing.T, providedType string, placementExists bool) {
"shards": [],
"shardSetId": 0,
"hostname": "localhost",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
}
},
"replicaFactor": 0,
@@ -202,8 +206,11 @@ func testLocalType(t *testing.T, providedType string, placementExists bool) {
}
}
`
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+
+ expected := xtest.MustPrettyJSONString(t, expectedResponse)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestLocalTypeClusteredPlacementAlreadyExists(t *testing.T) {
@@ -216,14 +223,13 @@ func TestLocalTypeClusteredPlacementAlreadyExists(t *testing.T) {
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "local"
- }
- `
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "local",
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
placementProto := &placementpb.Placement{
@@ -264,15 +270,14 @@ func TestLocalTypeWithNumShards(t *testing.T) {
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "local",
- "numShards": 51
- }
- `
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "local",
+ "numShards": 51,
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
mockKV.EXPECT().Get(namespace.M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound).Times(2)
@@ -328,6 +333,7 @@ func TestLocalTypeWithNumShards(t *testing.T) {
"enabled": true,
"blockSizeNanos": "3600000000000"
},
+ "runtimeOptions": null,
"schemaOptions": null,
"coldWritesEnabled": false
}
@@ -346,7 +352,10 @@ func TestLocalTypeWithNumShards(t *testing.T) {
"shards": [],
"shardSetId": 0,
"hostname": "localhost",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
}
},
"replicaFactor": 0,
@@ -360,8 +369,10 @@ func TestLocalTypeWithNumShards(t *testing.T) {
}
}
`
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+ expected := xtest.MustPrettyJSONString(t, expectedResponse)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestLocalWithBlockSizeNanos(t *testing.T) {
ctrl := gomock.NewController(t)
@@ -374,15 +385,14 @@ func TestLocalWithBlockSizeNanos(t *testing.T) {
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "local",
- "blockSize": {"time": "3h"}
- }
- `
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "local",
+ "blockSize": xjson.Map{"time": "3h"},
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
mockKV.EXPECT().Get(namespace.M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound).Times(2)
@@ -438,6 +448,7 @@ func TestLocalWithBlockSizeNanos(t *testing.T) {
"enabled": true,
"blockSizeNanos": "10800000000000"
},
+ "runtimeOptions": null,
"schemaOptions": null,
"coldWritesEnabled": false
}
@@ -456,7 +467,10 @@ func TestLocalWithBlockSizeNanos(t *testing.T) {
"shards": [],
"shardSetId": 0,
"hostname": "localhost",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
}
},
"replicaFactor": 0,
@@ -470,8 +484,10 @@ func TestLocalWithBlockSizeNanos(t *testing.T) {
}
}
`
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+ expected := xtest.MustPrettyJSONString(t, expectedResponse)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestLocalWithBlockSizeExpectedSeriesDatapointsPerHour(t *testing.T) {
@@ -488,15 +504,16 @@ func TestLocalWithBlockSizeExpectedSeriesDatapointsPerHour(t *testing.T) {
min := minRecommendCalculateBlockSize
desiredBlockSize := min + 5*time.Minute
- jsonInput := fmt.Sprintf(`
- {
- "namespaceName": "testNamespace",
- "type": "local",
- "blockSize": {"expectedSeriesDatapointsPerHour": %d}
- }
- `, int64(float64(blockSizeFromExpectedSeriesScalar)/float64(desiredBlockSize)))
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "local",
+ "blockSize": xjson.Map{
+ "expectedSeriesDatapointsPerHour": int64(float64(blockSizeFromExpectedSeriesScalar) / float64(desiredBlockSize)),
+ },
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
mockKV.EXPECT().Get(namespace.M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound).Times(2)
@@ -552,6 +569,7 @@ func TestLocalWithBlockSizeExpectedSeriesDatapointsPerHour(t *testing.T) {
"enabled": true,
"blockSizeNanos": "%d"
},
+ "runtimeOptions": null,
"schemaOptions": null,
"coldWritesEnabled": false
}
@@ -570,7 +588,10 @@ func TestLocalWithBlockSizeExpectedSeriesDatapointsPerHour(t *testing.T) {
"shards": [],
"shardSetId": 0,
"hostname": "localhost",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
}
},
"replicaFactor": 0,
@@ -585,8 +606,10 @@ func TestLocalWithBlockSizeExpectedSeriesDatapointsPerHour(t *testing.T) {
}
`, desiredBlockSize, desiredBlockSize)
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+ expected := xtest.MustPrettyJSONString(t, expectedResponse)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestClusterTypeHosts(t *testing.T) {
@@ -608,15 +631,14 @@ func TestClusterTypeHostsPlacementAlreadyExistsHostsProvided(t *testing.T) {
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "cluster",
- "hosts": [{"id": "host1"}, {"id": "host2"}]
- }
- `
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "cluster",
+ "hosts": xjson.Array{xjson.Map{"id": "host1"}, xjson.Map{"id": "host2"}},
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
placementProto := &placementpb.Placement{
@@ -664,14 +686,13 @@ func TestClusterTypeHostsPlacementAlreadyExistsExistingIsLocal(t *testing.T) {
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "cluster"
- }
- `
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "cluster",
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
placementProto := &placementpb.Placement{
@@ -711,26 +732,25 @@ func testClusterTypeHosts(t *testing.T, placementExists bool) {
require.NoError(t, err)
w := httptest.NewRecorder()
- var jsonInput string
+ var jsonInput xjson.Map
if placementExists {
- jsonInput = `
- {
+ jsonInput = xjson.Map{
"namespaceName": "testNamespace",
- "type": "cluster"
+ "type": "cluster",
}
- `
} else {
- jsonInput = `
- {
+ jsonInput = xjson.Map{
"namespaceName": "testNamespace",
- "type": "cluster",
- "hosts": [{"id": "host1"}, {"id": "host2"}]
+ "type": "cluster",
+ "hosts": xjson.Array{xjson.Map{"id": "host1"}, xjson.Map{"id": "host2"}},
}
- `
}
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ reqBody := bytes.NewBuffer(nil)
+ require.NoError(t, json.NewEncoder(reqBody).Encode(jsonInput))
+
+ req := httptest.NewRequest("POST", "/database/create", reqBody)
require.NotNil(t, req)
mockKV.EXPECT().Get(namespace.M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound).Times(2)
@@ -800,6 +820,7 @@ func testClusterTypeHosts(t *testing.T, placementExists bool) {
"enabled": true,
"blockSizeNanos": "3600000000000"
},
+ "runtimeOptions": null,
"schemaOptions": null,
"coldWritesEnabled": false
}
@@ -818,7 +839,10 @@ func testClusterTypeHosts(t *testing.T, placementExists bool) {
"shards": [],
"shardSetId": 0,
"hostname": "host1",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
},
"host2": {
"id": "host2",
@@ -829,7 +853,10 @@ func testClusterTypeHosts(t *testing.T, placementExists bool) {
"shards": [],
"shardSetId": 0,
"hostname": "host2",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
}
},
"replicaFactor": 0,
@@ -843,8 +870,11 @@ func testClusterTypeHosts(t *testing.T, placementExists bool) {
}
}
`
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+
+ expected := xtest.MustPrettyJSONString(t, expectedResponse)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestClusterTypeHostsWithIsolationGroup(t *testing.T) {
@@ -852,22 +882,24 @@ func TestClusterTypeHostsWithIsolationGroup(t *testing.T) {
defer ctrl.Finish()
mockClient, mockKV, mockPlacementService := SetupDatabaseTest(t, ctrl)
- mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil)
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil).AnyTimes()
createHandler, err := NewCreateHandler(mockClient, config.Configuration{},
testDBCfg, svcDefaultOptions, instrument.NewOptions())
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "cluster",
- "hosts": [{"id":"host1", "isolationGroup":"group1"}, {"id":"host2", "isolationGroup":"group2"}]
- }
- `
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "cluster",
+ "hosts": xjson.Array{
+ xjson.Map{"id": "host1", "isolationGroup": "group1"},
+ xjson.Map{"id": "host2", "isolationGroup": "group2"},
+ },
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
mockKV.EXPECT().Get(namespace.M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound).Times(2)
@@ -932,6 +964,7 @@ func TestClusterTypeHostsWithIsolationGroup(t *testing.T) {
"enabled": true,
"blockSizeNanos": "3600000000000"
},
+ "runtimeOptions": null,
"schemaOptions": null,
"coldWritesEnabled": false
}
@@ -950,7 +983,10 @@ func TestClusterTypeHostsWithIsolationGroup(t *testing.T) {
"shards": [],
"shardSetId": 0,
"hostname": "host1",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
},
"host2": {
"id": "host2",
@@ -961,7 +997,10 @@ func TestClusterTypeHostsWithIsolationGroup(t *testing.T) {
"shards": [],
"shardSetId": 0,
"hostname": "host2",
- "port": 9000
+ "port": 9000,
+ "metadata": {
+ "debugPort": 0
+ }
}
},
"replicaFactor": 0,
@@ -975,8 +1014,11 @@ func TestClusterTypeHostsWithIsolationGroup(t *testing.T) {
}
}
`
- assert.Equal(t, stripAllWhitespace(expectedResponse), string(body),
- xtest.Diff(xtest.MustPrettyJSON(t, expectedResponse), xtest.MustPrettyJSON(t, string(body))))
+
+ expected := xtest.MustPrettyJSONString(t, expectedResponse)
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestClusterTypeMissingHostnames(t *testing.T) {
ctrl := gomock.NewController(t)
@@ -990,14 +1032,13 @@ func TestClusterTypeMissingHostnames(t *testing.T) {
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "cluster"
- }
- `
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "cluster",
+ }
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
createHandler.ServeHTTP(w, req)
@@ -1006,7 +1047,13 @@ func TestClusterTypeMissingHostnames(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- assert.Equal(t, withEndline(`{"error":"missing required field"}`), string(body))
+ assert.Equal(t,
+ xtest.MustPrettyJSONMap(t,
+ xjson.Map{
+ "error": "missing required field",
+ },
+ ),
+ xtest.MustPrettyJSONString(t, string(body)))
}
func TestBadType(t *testing.T) {
@@ -1021,13 +1068,13 @@ func TestBadType(t *testing.T) {
require.NoError(t, err)
w := httptest.NewRecorder()
- jsonInput := `
- {
- "namespaceName": "testNamespace",
- "type": "badtype"
- }
- `
- req := httptest.NewRequest("POST", "/database/create", strings.NewReader(jsonInput))
+ jsonInput := xjson.Map{
+ "namespaceName": "testNamespace",
+ "type": "badtype",
+ }
+
+ req := httptest.NewRequest("POST", "/database/create",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
createHandler.ServeHTTP(w, req)
@@ -1035,18 +1082,11 @@ func TestBadType(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- assert.Equal(t, withEndline(`{"error":"invalid database type"}`), string(body))
-}
-
-func stripAllWhitespace(str string) string {
- return strings.Map(func(r rune) rune {
- if unicode.IsSpace(r) {
- return -1
- }
- return r
- }, str)
-}
-
-func withEndline(str string) string {
- return str + "\n"
+ assert.Equal(t,
+ xtest.MustPrettyJSONMap(t,
+ xjson.Map{
+ "error": "invalid database type",
+ },
+ ),
+ xtest.MustPrettyJSONString(t, string(body)))
}
diff --git a/src/query/api/v1/handler/graphite/find.go b/src/query/api/v1/handler/graphite/find.go
index 147b24aa39..3b892af6cb 100644
--- a/src/query/api/v1/handler/graphite/find.go
+++ b/src/query/api/v1/handler/graphite/find.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/graphite/graphite"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/util/logging"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/instrument"
@@ -70,8 +71,8 @@ type nodeDescriptor struct {
}
func mergeTags(
- terminatedResult *storage.CompleteTagsResult,
- childResult *storage.CompleteTagsResult,
+ terminatedResult *consolidators.CompleteTagsResult,
+ childResult *consolidators.CompleteTagsResult,
) (map[string]nodeDescriptor, error) {
// sanity check the case.
if terminatedResult.CompleteNameOnly {
@@ -110,7 +111,7 @@ func (h *grahiteFindHandler) ServeHTTP(
) {
ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
logger := logging.WithContext(ctx, h.instrumentOpts)
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
// NB: need to run two separate queries, one of which will match only the
// provided matchers, and one which will match the provided matchers with at
@@ -129,9 +130,9 @@ func (h *grahiteFindHandler) ServeHTTP(
}
var (
- terminatedResult *storage.CompleteTagsResult
+ terminatedResult *consolidators.CompleteTagsResult
tErr error
- childResult *storage.CompleteTagsResult
+ childResult *consolidators.CompleteTagsResult
cErr error
wg sync.WaitGroup
)
diff --git a/src/query/api/v1/handler/graphite/find_parser.go b/src/query/api/v1/handler/graphite/find_parser.go
index 5533dac01e..f2ee0bc730 100644
--- a/src/query/api/v1/handler/graphite/find_parser.go
+++ b/src/query/api/v1/handler/graphite/find_parser.go
@@ -57,8 +57,7 @@ func parseFindParamsToQueries(r *http.Request) (
_rawQueryString string,
_err *xhttp.ParseError,
) {
- values := r.URL.Query()
- query := values.Get("query")
+ query := r.FormValue("query")
if query == "" {
return nil, nil, "",
xhttp.NewParseError(errors.ErrNoQueryFound, http.StatusBadRequest)
diff --git a/src/query/api/v1/handler/graphite/find_test.go b/src/query/api/v1/handler/graphite/find_test.go
index 2196d38ab8..f7651ff976 100644
--- a/src/query/api/v1/handler/graphite/find_test.go
+++ b/src/query/api/v1/handler/graphite/find_test.go
@@ -36,6 +36,8 @@ import (
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/x/headers"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -133,9 +135,9 @@ func setupStorage(ctrl *gomock.Controller, ex, ex2 bool) storage.Storage {
},
}
- noChildrenResult := &storage.CompleteTagsResult{
+ noChildrenResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
- CompletedTags: []storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
{Name: b("__g1__"), Values: bs("bug", "bar", "baz")},
},
Metadata: block.ResultMetadata{
@@ -156,9 +158,9 @@ func setupStorage(ctrl *gomock.Controller, ex, ex2 bool) storage.Storage {
},
}
- childrenResult := &storage.CompleteTagsResult{
+ childrenResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
- CompletedTags: []storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
{Name: b("__g1__"), Values: bs("baz", "bix", "bug")},
},
Metadata: block.ResultMetadata{
@@ -218,26 +220,37 @@ func (r results) Less(i, j int) bool {
return strings.Compare(r[i].ID, r[j].ID) == -1
}
-func testFind(t *testing.T, ex bool, ex2 bool, header string) {
+func testFind(t *testing.T, httpMethod string, ex bool, ex2 bool, header string) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// setup storage and handler
store := setupStorage(ctrl, ex, ex2)
- builder := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ builder := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetFetchOptionsBuilder(builder).
SetStorage(store)
h := NewFindHandler(opts)
// execute the query
+ params := make(url.Values)
+ params.Set("query", "foo.b*")
+ params.Set("from", from.s)
+ params.Set("until", until.s)
+
w := &writer{}
req := &http.Request{
- URL: &url.URL{
- RawQuery: fmt.Sprintf("query=foo.b*&from=%s&until=%s", from.s, until.s),
- },
+ Method: httpMethod,
+ }
+ switch httpMethod {
+ case http.MethodGet:
+ req.URL = &url.URL{
+ RawQuery: params.Encode(),
+ }
+ case http.MethodPost:
+ req.Form = params
}
h.ServeHTTP(w, req)
@@ -269,7 +282,7 @@ func testFind(t *testing.T, ex bool, ex2 bool, header string) {
}
require.Equal(t, expected, r)
- actual := w.Header().Get(handleroptions.LimitHeader)
+ actual := w.Header().Get(headers.LimitHeader)
assert.Equal(t, header, actual)
}
@@ -279,17 +292,19 @@ var limitTests = []struct {
header string
}{
{"both incomplete", false, false, fmt.Sprintf(
- "%s,%s_%s", handleroptions.LimitHeaderSeriesLimitApplied, "foo", "bar")},
+ "%s,%s_%s", headers.LimitHeaderSeriesLimitApplied, "foo", "bar")},
{"with terminator incomplete", true, false, "foo_bar"},
{"with children incomplete", false, true,
- handleroptions.LimitHeaderSeriesLimitApplied},
+ headers.LimitHeaderSeriesLimitApplied},
{"both complete", true, true, ""},
}
func TestFind(t *testing.T) {
for _, tt := range limitTests {
t.Run(tt.name, func(t *testing.T) {
- testFind(t, tt.ex, tt.ex2, tt.header)
+ for _, httpMethod := range FindHTTPMethods {
+ testFind(t, httpMethod, tt.ex, tt.ex2, tt.header)
+ }
})
}
}
diff --git a/src/query/api/v1/handler/graphite/render.go b/src/query/api/v1/handler/graphite/render.go
index bb43b9ae59..218051e28c 100644
--- a/src/query/api/v1/handler/graphite/render.go
+++ b/src/query/api/v1/handler/graphite/render.go
@@ -38,6 +38,7 @@ import (
graphite "github.com/m3db/m3/src/query/graphite/storage"
"github.com/m3db/m3/src/query/graphite/ts"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/headers"
xhttp "github.com/m3db/m3/src/x/net/http"
)
@@ -98,7 +99,8 @@ func (h *renderHandler) serveHTTP(
return respError{err: err, code: http.StatusBadRequest}
}
- limit, err := handleroptions.ParseLimit(r, h.queryContextOpts.LimitMaxTimeseries)
+ limit, err := handleroptions.ParseLimit(r, headers.LimitMaxSeriesHeader,
+ "limit", h.queryContextOpts.LimitMaxTimeseries)
if err != nil {
return respError{err: err, code: http.StatusBadRequest}
}
diff --git a/src/query/api/v1/handler/graphite/render_parser.go b/src/query/api/v1/handler/graphite/render_parser.go
index 8a1e495c4a..6c8dd8a399 100644
--- a/src/query/api/v1/handler/graphite/render_parser.go
+++ b/src/query/api/v1/handler/graphite/render_parser.go
@@ -33,6 +33,7 @@ import (
"github.com/m3db/m3/src/query/graphite/graphite"
"github.com/m3db/m3/src/query/graphite/ts"
"github.com/m3db/m3/src/query/util/json"
+ xhttp "github.com/m3db/m3/src/x/net/http"
)
const (
@@ -54,12 +55,12 @@ func WriteRenderResponse(
format string,
) error {
if format == pickleFormat {
- w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeOctetStream)
return renderResultsPickle(w, series.Values)
}
// NB: return json unless requesting specifically `pickleFormat`
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
return renderResultsJSON(w, series.Values)
}
diff --git a/src/query/api/v1/handler/graphite/render_test.go b/src/query/api/v1/handler/graphite/render_test.go
index 1da2d22b4d..9a1de559cc 100644
--- a/src/query/api/v1/handler/graphite/render_test.go
+++ b/src/query/api/v1/handler/graphite/render_test.go
@@ -28,7 +28,6 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/graphite/graphite"
@@ -36,6 +35,7 @@ import (
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/mock"
"github.com/m3db/m3/src/query/ts"
+ "github.com/m3db/m3/src/x/headers"
xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
@@ -335,7 +335,7 @@ func TestParseQueryResultsMultiTargetWithLimits(t *testing.T) {
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
- actual := recorder.Header().Get(handleroptions.LimitHeader)
+ actual := recorder.Header().Get(headers.LimitHeader)
assert.Equal(t, tt.header, actual)
})
}
diff --git a/src/query/api/v1/handler/influxdb/write.go b/src/query/api/v1/handler/influxdb/write.go
index 95aa614941..e69178cfb5 100644
--- a/src/query/api/v1/handler/influxdb/write.go
+++ b/src/query/api/v1/handler/influxdb/write.go
@@ -36,10 +36,10 @@ import (
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/query/util/logging"
+ imodels "github.com/influxdata/influxdb/models"
xerrors "github.com/m3db/m3/src/x/errors"
xhttp "github.com/m3db/m3/src/x/net/http"
xtime "github.com/m3db/m3/src/x/time"
- imodels "github.com/influxdata/influxdb/models"
"go.uber.org/zap"
)
@@ -51,6 +51,12 @@ const (
InfluxWriteHTTPMethod = http.MethodPost
)
+var defaultValue = ingest.IterValue{
+ Tags: models.EmptyTags(),
+ Attributes: ts.DefaultSeriesAttributes(),
+ Metadata: ts.Metadata{},
+}
+
type ingestWriteHandler struct {
handlerOpts options.HandlerOptions
tagOpts models.TagOptions
@@ -71,6 +77,7 @@ type ingestIterator struct {
// internal
pointIndex int
err xerrors.MultiError
+ metadatas []ts.Metadata
// following entries are within current point, and initialized
// when we go to the first entry in the current point
@@ -218,7 +225,7 @@ func determineTimeUnit(t time.Time) xtime.Unit {
return xtime.Nanosecond
}
-func (ii *ingestIterator) Current() (models.Tags, ts.Datapoints, xtime.Unit, []byte) {
+func (ii *ingestIterator) Current() ingest.IterValue {
if ii.pointIndex < len(ii.points) && ii.nextFieldIndex > 0 && len(ii.fields) > (ii.nextFieldIndex-1) {
point := ii.points[ii.pointIndex]
field := ii.fields[ii.nextFieldIndex-1]
@@ -226,10 +233,18 @@ func (ii *ingestIterator) Current() (models.Tags, ts.Datapoints, xtime.Unit, []b
t := point.Time()
- return tags, []ts.Datapoint{ts.Datapoint{Timestamp: t,
- Value: field.value}}, determineTimeUnit(t), nil
+ value := ingest.IterValue{
+ Tags: tags,
+ Datapoints: []ts.Datapoint{ts.Datapoint{Timestamp: t, Value: field.value}},
+ Attributes: ts.DefaultSeriesAttributes(),
+ Unit: determineTimeUnit(t),
+ }
+ if ii.pointIndex < len(ii.metadatas) {
+ value.Metadata = ii.metadatas[ii.pointIndex]
+ }
+ return value
}
- return models.EmptyTags(), nil, 0, nil
+ return defaultValue
}
func (ii *ingestIterator) Reset() error {
@@ -243,6 +258,23 @@ func (ii *ingestIterator) Error() error {
return ii.err.FinalError()
}
+func (ii *ingestIterator) SetCurrentMetadata(metadata ts.Metadata) {
+ if len(ii.metadatas) == 0 {
+ ii.metadatas = make([]ts.Metadata, len(ii.points))
+ }
+ if ii.pointIndex < len(ii.points) {
+ ii.metadatas[ii.pointIndex] = metadata
+ }
+}
+
+func (ii *ingestIterator) CurrentMetadata() ts.Metadata {
+ if len(ii.metadatas) == 0 || ii.pointIndex >= len(ii.metadatas) {
+ return ts.Metadata{}
+ }
+ return ii.metadatas[ii.pointIndex]
+}
+
+// NewInfluxWriterHandler returns a new influx write handler.
func NewInfluxWriterHandler(options options.HandlerOptions) http.Handler {
return &ingestWriteHandler{handlerOpts: options,
tagOpts: options.TagOptions(),
diff --git a/src/query/api/v1/handler/influxdb/write_test.go b/src/query/api/v1/handler/influxdb/write_test.go
index d3aa2c3bd1..49f9ad5fef 100644
--- a/src/query/api/v1/handler/influxdb/write_test.go
+++ b/src/query/api/v1/handler/influxdb/write_test.go
@@ -25,8 +25,8 @@ import (
"testing"
"time"
- xtime "github.com/m3db/m3/src/x/time"
imodels "github.com/influxdata/influxdb/models"
+ xtime "github.com/m3db/m3/src/x/time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -35,10 +35,10 @@ import (
// they are easiest for human to handle
func (self *ingestIterator) pop(t *testing.T) string {
if self.Next() {
- tags, dp, _, _ := self.Current()
- assert.Equal(t, 1, len(dp))
+ value := self.Current()
+ assert.Equal(t, 1, len(value.Datapoints))
- return fmt.Sprintf("%s %v %s", tags.String(), dp[0].Value, dp[0].Timestamp)
+ return fmt.Sprintf("%s %v %s", value.Tags.String(), value.Datapoints[0].Value, value.Datapoints[0].Timestamp)
}
return ""
}
@@ -108,14 +108,14 @@ func TestIngestIteratorIssue2125(t *testing.T) {
require.NoError(t, iter.Error())
assert.True(t, iter.Next())
- t1, _, _, _ := iter.Current()
+ value1 := iter.Current()
assert.True(t, iter.Next())
- t2, _, _, _ := iter.Current()
+ value2 := iter.Current()
require.NoError(t, iter.Error())
- assert.Equal(t, t1.String(), "__name__: measure_k1, lab: foo")
- assert.Equal(t, t2.String(), "__name__: measure_k2, lab: foo")
+ assert.Equal(t, value1.Tags.String(), "__name__: measure_k1, lab: foo")
+ assert.Equal(t, value2.Tags.String(), "__name__: measure_k2, lab: foo")
}
func TestDetermineTimeUnit(t *testing.T) {
diff --git a/src/query/api/v1/handler/json/write.go b/src/query/api/v1/handler/json/write.go
index f08af844c6..abc1e1a3a8 100644
--- a/src/query/api/v1/handler/json/write.go
+++ b/src/query/api/v1/handler/json/write.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/query/util"
"github.com/m3db/m3/src/query/util/logging"
@@ -50,6 +51,7 @@ const (
// WriteJSONHandler represents a handler for the write json endpoint
type WriteJSONHandler struct {
+ opts options.HandlerOptions
store storage.Storage
instrumentOpts instrument.Options
}
@@ -57,6 +59,7 @@ type WriteJSONHandler struct {
// NewWriteJSONHandler returns a new instance of handler.
func NewWriteJSONHandler(opts options.HandlerOptions) http.Handler {
return &WriteJSONHandler{
+ opts: opts,
store: opts.Storage(),
instrumentOpts: opts.InstrumentOpts(),
}
@@ -79,7 +82,7 @@ func (h *WriteJSONHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- writeQuery, err := newStorageWriteQuery(req)
+ writeQuery, err := h.newWriteQuery(req)
if err != nil {
logger := logging.WithContext(r.Context(), h.instrumentOpts)
logger.Error("parsing error",
@@ -97,18 +100,18 @@ func (h *WriteJSONHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
-func newStorageWriteQuery(req *WriteQuery) (*storage.WriteQuery, error) {
+func (h *WriteJSONHandler) newWriteQuery(req *WriteQuery) (*storage.WriteQuery, error) {
parsedTime, err := util.ParseTimeString(req.Timestamp)
if err != nil {
return nil, err
}
- tags := models.NewTags(len(req.Tags), nil)
+ tags := models.NewTags(len(req.Tags), h.opts.TagOptions())
for n, v := range req.Tags {
tags = tags.AddTag(models.Tag{Name: []byte(n), Value: []byte(v)})
}
- return &storage.WriteQuery{
+ return storage.NewWriteQuery(storage.WriteQueryOptions{
Tags: tags,
Datapoints: ts.Datapoints{
{
@@ -118,10 +121,10 @@ func newStorageWriteQuery(req *WriteQuery) (*storage.WriteQuery, error) {
},
Unit: xtime.Millisecond,
Annotation: nil,
- Attributes: storage.Attributes{
- MetricsType: storage.UnaggregatedMetricsType,
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
},
- }, nil
+ })
}
func parseRequest(r *http.Request) (*WriteQuery, *xhttp.ParseError) {
diff --git a/src/query/api/v1/handler/json/write_test.go b/src/query/api/v1/handler/json/write_test.go
index 28e86a9a4e..10295e9224 100644
--- a/src/query/api/v1/handler/json/write_test.go
+++ b/src/query/api/v1/handler/json/write_test.go
@@ -22,7 +22,6 @@ package json
import (
"bytes"
- "context"
"fmt"
"io/ioutil"
"net/http"
@@ -31,6 +30,7 @@ import (
"testing"
"github.com/m3db/m3/src/query/api/v1/options"
+ "github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/test/m3"
"github.com/golang/mock/gomock"
@@ -79,22 +79,20 @@ func TestJSONWrite(t *testing.T) {
session.EXPECT().IteratorPools().
Return(nil, nil).AnyTimes()
- opts := options.EmptyHandlerOptions().SetStorage(storage)
- jsonWrite := NewWriteJSONHandler(opts).(*WriteJSONHandler)
+ opts := options.EmptyHandlerOptions().
+ SetTagOptions(models.NewTagOptions()).
+ SetStorage(storage)
+ handler := NewWriteJSONHandler(opts).(*WriteJSONHandler)
jsonReq := generateJSONWriteRequest()
req, err := http.NewRequest(JSONWriteHTTPMethod, WriteJSONURL,
strings.NewReader(jsonReq))
require.NoError(t, err)
- r, rErr := parseRequest(req)
- require.Nil(t, rErr, "unable to parse request")
-
- writeQuery, err := newStorageWriteQuery(r)
- require.NoError(t, err)
+ resp := httptest.NewRecorder()
+ handler.ServeHTTP(resp, req)
- writeErr := jsonWrite.store.Write(context.TODO(), writeQuery)
- require.NoError(t, writeErr)
+ require.Equal(t, http.StatusOK, resp.Code)
}
func TestJSONWriteError(t *testing.T) {
diff --git a/src/query/api/v1/handler/namespace/add.go b/src/query/api/v1/handler/namespace/add.go
index 649c853901..43e0bd0d6c 100644
--- a/src/query/api/v1/handler/namespace/add.go
+++ b/src/query/api/v1/handler/namespace/add.go
@@ -28,7 +28,6 @@ import (
"path"
clusterclient "github.com/m3db/m3/src/cluster/client"
- "github.com/m3db/m3/src/cluster/kv"
nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/query/api/v1/handler"
@@ -133,11 +132,7 @@ func (h *AddHandler) Add(
return emptyReg, fmt.Errorf("unable to get metadata: %v", err)
}
- kvOpts := kv.NewOverrideOptions().
- SetEnvironment(opts.ServiceEnvironment).
- SetZone(opts.ServiceZone)
-
- store, err := h.client.Store(kvOpts)
+ store, err := h.client.Store(opts.KVOverrideOptions())
if err != nil {
return emptyReg, err
}
diff --git a/src/query/api/v1/handler/namespace/add_test.go b/src/query/api/v1/handler/namespace/add_test.go
index c361ec49a8..3655083719 100644
--- a/src/query/api/v1/handler/namespace/add_test.go
+++ b/src/query/api/v1/handler/namespace/add_test.go
@@ -30,6 +30,8 @@ import (
"github.com/m3db/m3/src/cluster/kv"
nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
"github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -73,14 +75,13 @@ func TestNamespaceAddHandler(t *testing.T) {
// Error case where required fields are not set
w := httptest.NewRecorder()
- jsonInput := `
- {
- "name": "testNamespace",
- "options": {}
- }
- `
+ jsonInput := xjson.Map{
+ "name": "testNamespace",
+ "options": xjson.Map{},
+ }
- req := httptest.NewRequest("POST", "/namespace", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/namespace",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
addHandler.ServeHTTP(svcDefaults, w, req)
@@ -105,7 +106,43 @@ func TestNamespaceAddHandler(t *testing.T) {
resp = w.Result()
body, _ = ioutil.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode)
- assert.Equal(t, "{\"registry\":{\"namespaces\":{\"testNamespace\":{\"bootstrapEnabled\":true,\"flushEnabled\":true,\"writesToCommitLog\":true,\"cleanupEnabled\":true,\"repairEnabled\":true,\"retentionOptions\":{\"retentionPeriodNanos\":\"172800000000000\",\"blockSizeNanos\":\"7200000000000\",\"bufferFutureNanos\":\"600000000000\",\"bufferPastNanos\":\"600000000000\",\"blockDataExpiry\":true,\"blockDataExpiryAfterNotAccessPeriodNanos\":\"300000000000\",\"futureRetentionPeriodNanos\":\"0\"},\"snapshotEnabled\":true,\"indexOptions\":{\"enabled\":true,\"blockSizeNanos\":\"7200000000000\"},\"schemaOptions\":null,\"coldWritesEnabled\":false}}}}", string(body))
+
+ expected := xtest.MustPrettyJSONMap(t,
+ xjson.Map{
+ "registry": xjson.Map{
+ "namespaces": xjson.Map{
+ "testNamespace": xjson.Map{
+ "bootstrapEnabled": true,
+ "flushEnabled": true,
+ "writesToCommitLog": true,
+ "cleanupEnabled": true,
+ "repairEnabled": true,
+ "retentionOptions": xjson.Map{
+ "retentionPeriodNanos": "172800000000000",
+ "blockSizeNanos": "7200000000000",
+ "bufferFutureNanos": "600000000000",
+ "bufferPastNanos": "600000000000",
+ "blockDataExpiry": true,
+ "blockDataExpiryAfterNotAccessPeriodNanos": "300000000000",
+ "futureRetentionPeriodNanos": "0",
+ },
+ "snapshotEnabled": true,
+ "indexOptions": xjson.Map{
+ "enabled": true,
+ "blockSizeNanos": "7200000000000",
+ },
+ "runtimeOptions": nil,
+ "schemaOptions": nil,
+ "coldWritesEnabled": false,
+ },
+ },
+ },
+ })
+
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual,
+ xtest.Diff(expected, actual))
}
func TestNamespaceAddHandler_Conflict(t *testing.T) {
diff --git a/src/query/api/v1/handler/namespace/common.go b/src/query/api/v1/handler/namespace/common.go
index a2a51c794c..388eb09334 100644
--- a/src/query/api/v1/handler/namespace/common.go
+++ b/src/query/api/v1/handler/namespace/common.go
@@ -21,6 +21,7 @@
package namespace
import (
+ "errors"
"fmt"
"net/http"
"path"
@@ -58,6 +59,8 @@ var (
M3DBServiceNamespacePathName = path.Join(ServicesPathName, M3DBServiceName, NamespacePathName)
// M3DBServiceSchemaPathName is the M3DB service schema API path.
M3DBServiceSchemaPathName = path.Join(ServicesPathName, M3DBServiceName, SchemaPathName)
+
+ errNamespaceNotFound = errors.New("unable to find a namespace with specified name")
)
// Handler represents a generic handler for namespace endpoints.
@@ -120,7 +123,8 @@ func RegisterRoutes(
}
// Get M3DB namespaces.
- getHandler := wrapped(NewGetHandler(client, instrumentOpts))
+ getHandler := wrapped(
+ applyMiddleware(NewGetHandler(client, instrumentOpts).ServeHTTP, defaults))
r.HandleFunc(DeprecatedM3DBGetURL, getHandler.ServeHTTP).Methods(GetHTTPMethod)
r.HandleFunc(M3DBGetURL, getHandler.ServeHTTP).Methods(GetHTTPMethod)
@@ -130,8 +134,14 @@ func RegisterRoutes(
r.HandleFunc(DeprecatedM3DBAddURL, addHandler.ServeHTTP).Methods(AddHTTPMethod)
r.HandleFunc(M3DBAddURL, addHandler.ServeHTTP).Methods(AddHTTPMethod)
+ // Update M3DB namespaces.
+ updateHandler := wrapped(
+ applyMiddleware(NewUpdateHandler(client, instrumentOpts).ServeHTTP, defaults))
+ r.HandleFunc(M3DBUpdateURL, updateHandler.ServeHTTP).Methods(UpdateHTTPMethod)
+
// Delete M3DB namespaces.
- deleteHandler := wrapped(NewDeleteHandler(client, instrumentOpts))
+ deleteHandler := wrapped(
+ applyMiddleware(NewDeleteHandler(client, instrumentOpts).ServeHTTP, defaults))
r.HandleFunc(DeprecatedM3DBDeleteURL, deleteHandler.ServeHTTP).Methods(DeleteHTTPMethod)
r.HandleFunc(M3DBDeleteURL, deleteHandler.ServeHTTP).Methods(DeleteHTTPMethod)
diff --git a/src/query/api/v1/handler/namespace/common_test.go b/src/query/api/v1/handler/namespace/common_test.go
index ef5c0c6d97..b4649bf8d0 100644
--- a/src/query/api/v1/handler/namespace/common_test.go
+++ b/src/query/api/v1/handler/namespace/common_test.go
@@ -22,6 +22,7 @@ package namespace
import (
"errors"
+ "fmt"
"testing"
"github.com/m3db/m3/src/cluster/kv"
@@ -32,6 +33,38 @@ import (
"github.com/stretchr/testify/require"
)
+type storeOptionsMatcher struct {
+ zone string
+ namespace string
+ environment string
+}
+
+func (s storeOptionsMatcher) Matches(x interface{}) bool {
+ opts := x.(kv.OverrideOptions)
+ if s.zone != "" && s.zone != opts.Zone() {
+ return false
+ }
+ if s.namespace != "" && s.namespace != opts.Namespace() {
+ return false
+ }
+ if s.environment != "" && s.environment != opts.Environment() {
+ return false
+ }
+ return true
+}
+
+func (s storeOptionsMatcher) String() string {
+ return fmt.Sprintf("checks that zone=%s, namespace=%s, environment=%s", s.zone, s.namespace, s.environment)
+}
+
+func newStoreOptionsMatcher(zone, namespace, environment string) gomock.Matcher {
+ return storeOptionsMatcher{
+ zone: zone,
+ namespace: namespace,
+ environment: environment,
+ }
+}
+
func TestMetadata(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -56,7 +89,7 @@ func TestMetadata(t *testing.T) {
registry := nsproto.Registry{
Namespaces: map[string]*nsproto.NamespaceOptions{
- "metrics-ns1": &nsproto.NamespaceOptions{
+ "metrics-ns1": {
BootstrapEnabled: true,
FlushEnabled: true,
WritesToCommitLog: false,
@@ -71,7 +104,7 @@ func TestMetadata(t *testing.T) {
BlockDataExpiryAfterNotAccessPeriodNanos: 5000000000,
},
},
- "metrics-ns2": &nsproto.NamespaceOptions{
+ "metrics-ns2": {
BootstrapEnabled: true,
FlushEnabled: true,
WritesToCommitLog: true,
diff --git a/src/query/api/v1/handler/namespace/delete.go b/src/query/api/v1/handler/namespace/delete.go
index 930f5fc25b..7b3fa9f93b 100644
--- a/src/query/api/v1/handler/namespace/delete.go
+++ b/src/query/api/v1/handler/namespace/delete.go
@@ -31,6 +31,7 @@ import (
clusterclient "github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/query/api/v1/handler"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/instrument"
xhttp "github.com/m3db/m3/src/x/net/http"
@@ -60,8 +61,6 @@ var (
)
var (
- errNamespaceNotFound = errors.New("unable to find a namespace with specified name")
-
errEmptyID = errors.New("must specify namespace ID to delete")
)
@@ -79,7 +78,11 @@ func NewDeleteHandler(
}
}
-func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+func (h *DeleteHandler) ServeHTTP(
+ svc handleroptions.ServiceNameAndDefaults,
+ w http.ResponseWriter,
+ r *http.Request,
+) {
ctx := r.Context()
logger := logging.WithContext(ctx, h.instrumentOpts)
id := strings.TrimSpace(mux.Vars(r)[namespaceIDVar])
@@ -89,7 +92,8 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- err := h.Delete(id)
+ opts := handleroptions.NewServiceOptions(svc, r.Header, nil)
+ err := h.Delete(id, opts)
if err != nil {
logger.Error("unable to delete namespace", zap.Error(err))
if err == errNamespaceNotFound {
@@ -108,8 +112,8 @@ func (h *DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// Delete deletes a namespace.
-func (h *DeleteHandler) Delete(id string) error {
- store, err := h.client.KV()
+func (h *DeleteHandler) Delete(id string, opts handleroptions.ServiceOptions) error {
+ store, err := h.client.Store(opts.KVOverrideOptions())
if err != nil {
return err
}
diff --git a/src/query/api/v1/handler/namespace/delete_test.go b/src/query/api/v1/handler/namespace/delete_test.go
index 379084c6c1..08fd5bb1b0 100644
--- a/src/query/api/v1/handler/namespace/delete_test.go
+++ b/src/query/api/v1/handler/namespace/delete_test.go
@@ -41,6 +41,7 @@ func TestNamespaceDeleteHandlerNotFound(t *testing.T) {
defer ctrl.Finish()
mockClient, mockKV := setupNamespaceTest(t, ctrl)
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil).AnyTimes()
deleteHandler := NewDeleteHandler(mockClient, instrument.NewOptions())
w := httptest.NewRecorder()
@@ -50,7 +51,7 @@ func TestNamespaceDeleteHandlerNotFound(t *testing.T) {
require.NotNil(t, req)
mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound)
- deleteHandler.ServeHTTP(w, req)
+ deleteHandler.ServeHTTP(svcDefaults, w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
@@ -63,6 +64,7 @@ func TestNamespaceDeleteHandlerDeleteAll(t *testing.T) {
defer ctrl.Finish()
mockClient, mockKV := setupNamespaceTest(t, ctrl)
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil).AnyTimes()
deleteHandler := NewDeleteHandler(mockClient, instrument.NewOptions())
w := httptest.NewRecorder()
@@ -97,7 +99,7 @@ func TestNamespaceDeleteHandlerDeleteAll(t *testing.T) {
mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(mockValue, nil)
mockKV.EXPECT().Delete(M3DBNodeNamespacesKey).Return(nil, nil)
- deleteHandler.ServeHTTP(w, req)
+ deleteHandler.ServeHTTP(svcDefaults, w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
@@ -110,6 +112,7 @@ func TestNamespaceDeleteHandler(t *testing.T) {
defer ctrl.Finish()
mockClient, mockKV := setupNamespaceTest(t, ctrl)
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil).AnyTimes()
deleteHandler := NewDeleteHandler(mockClient, instrument.NewOptions())
w := httptest.NewRecorder()
@@ -159,7 +162,7 @@ func TestNamespaceDeleteHandler(t *testing.T) {
mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(mockValue, nil)
mockKV.EXPECT().CheckAndSet(M3DBNodeNamespacesKey, gomock.Any(), gomock.Any()).Return(1, nil)
- deleteHandler.ServeHTTP(w, req)
+ deleteHandler.ServeHTTP(svcDefaults, w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
diff --git a/src/query/api/v1/handler/namespace/get.go b/src/query/api/v1/handler/namespace/get.go
index 7c3b8d756f..071c3c701d 100644
--- a/src/query/api/v1/handler/namespace/get.go
+++ b/src/query/api/v1/handler/namespace/get.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/cluster/kv"
nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
"github.com/m3db/m3/src/query/api/v1/handler"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/generated/proto/admin"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/instrument"
@@ -71,10 +72,15 @@ func NewGetHandler(
}
}
-func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+func (h *GetHandler) ServeHTTP(
+ svc handleroptions.ServiceNameAndDefaults,
+ w http.ResponseWriter,
+ r *http.Request,
+) {
ctx := r.Context()
logger := logging.WithContext(ctx, h.instrumentOpts)
- nsRegistry, err := h.Get()
+ opts := handleroptions.NewServiceOptions(svc, r.Header, nil)
+ nsRegistry, err := h.Get(opts)
if err != nil {
logger.Error("unable to get namespace", zap.Error(err))
@@ -102,10 +108,10 @@ func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// Get gets the namespaces.
-func (h *GetHandler) Get() (nsproto.Registry, error) {
+func (h *GetHandler) Get(opts handleroptions.ServiceOptions) (nsproto.Registry, error) {
var emptyReg = nsproto.Registry{}
- store, err := h.client.KV()
+ store, err := h.client.Store(opts.KVOverrideOptions())
if err != nil {
return emptyReg, err
}
diff --git a/src/query/api/v1/handler/namespace/get_test.go b/src/query/api/v1/handler/namespace/get_test.go
index c9e4a55a49..33890d1d31 100644
--- a/src/query/api/v1/handler/namespace/get_test.go
+++ b/src/query/api/v1/handler/namespace/get_test.go
@@ -29,7 +29,10 @@ import (
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/kv"
nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
+ "github.com/m3db/m3/src/x/headers"
"github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -54,6 +57,7 @@ func TestNamespaceGetHandler(t *testing.T) {
mockClient, mockKV := setupNamespaceTest(t, ctrl)
getHandler := NewGetHandler(mockClient, instrument.NewOptions())
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil)
// Test no namespace
w := httptest.NewRecorder()
@@ -61,8 +65,10 @@ func TestNamespaceGetHandler(t *testing.T) {
req := httptest.NewRequest("GET", "/namespace/get", nil)
require.NotNil(t, req)
+ matcher := newStoreOptionsMatcher("", "", "test_env")
+ mockClient.EXPECT().Store(matcher).Return(mockKV, nil)
mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound)
- getHandler.ServeHTTP(w, req)
+ getHandler.ServeHTTP(svcDefaults, w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
@@ -73,11 +79,12 @@ func TestNamespaceGetHandler(t *testing.T) {
w = httptest.NewRecorder()
req = httptest.NewRequest("GET", "/namespace/get", nil)
+ req.Header.Set(headers.HeaderClusterEnvironmentName, "test_env")
require.NotNil(t, req)
registry := nsproto.Registry{
Namespaces: map[string]*nsproto.NamespaceOptions{
- "test": &nsproto.NamespaceOptions{
+ "test": {
BootstrapEnabled: true,
FlushEnabled: true,
SnapshotEnabled: true,
@@ -100,12 +107,45 @@ func TestNamespaceGetHandler(t *testing.T) {
mockValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).SetArg(0, registry)
mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(mockValue, nil)
- getHandler.ServeHTTP(w, req)
+ getHandler.ServeHTTP(svcDefaults, w, req)
resp = w.Result()
body, _ = ioutil.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode)
- assert.Equal(t, "{\"registry\":{\"namespaces\":{\"test\":{\"bootstrapEnabled\":true,\"flushEnabled\":true,\"writesToCommitLog\":true,\"cleanupEnabled\":false,\"repairEnabled\":false,\"retentionOptions\":{\"retentionPeriodNanos\":\"172800000000000\",\"blockSizeNanos\":\"7200000000000\",\"bufferFutureNanos\":\"600000000000\",\"bufferPastNanos\":\"600000000000\",\"blockDataExpiry\":true,\"blockDataExpiryAfterNotAccessPeriodNanos\":\"3600000000000\",\"futureRetentionPeriodNanos\":\"0\"},\"snapshotEnabled\":true,\"indexOptions\":null,\"schemaOptions\":null,\"coldWritesEnabled\":false}}}}", string(body))
+
+ expected := xtest.MustPrettyJSONMap(t,
+ xjson.Map{
+ "registry": xjson.Map{
+ "namespaces": xjson.Map{
+ "test": xjson.Map{
+ "bootstrapEnabled": true,
+ "cleanupEnabled": false,
+ "coldWritesEnabled": false,
+ "flushEnabled": true,
+ "indexOptions": nil,
+ "repairEnabled": false,
+ "retentionOptions": xjson.Map{
+ "blockDataExpiry": true,
+ "blockDataExpiryAfterNotAccessPeriodNanos": "3600000000000",
+ "blockSizeNanos": "7200000000000",
+ "bufferFutureNanos": "600000000000",
+ "bufferPastNanos": "600000000000",
+ "futureRetentionPeriodNanos": "0",
+ "retentionPeriodNanos": "172800000000000",
+ },
+ "runtimeOptions": nil,
+ "schemaOptions": nil,
+ "snapshotEnabled": true,
+ "writesToCommitLog": true,
+ },
+ },
+ },
+ })
+
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual,
+ xtest.Diff(expected, actual))
}
func TestNamespaceGetHandlerWithDebug(t *testing.T) {
@@ -114,6 +154,7 @@ func TestNamespaceGetHandlerWithDebug(t *testing.T) {
mockClient, mockKV := setupNamespaceTest(t, ctrl)
getHandler := NewGetHandler(mockClient, instrument.NewOptions())
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil)
// Test namespace present
w := httptest.NewRecorder()
@@ -123,7 +164,7 @@ func TestNamespaceGetHandlerWithDebug(t *testing.T) {
registry := nsproto.Registry{
Namespaces: map[string]*nsproto.NamespaceOptions{
- "test": &nsproto.NamespaceOptions{
+ "test": {
BootstrapEnabled: true,
FlushEnabled: true,
SnapshotEnabled: true,
@@ -146,10 +187,43 @@ func TestNamespaceGetHandlerWithDebug(t *testing.T) {
mockValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).SetArg(0, registry)
mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(mockValue, nil)
- getHandler.ServeHTTP(w, req)
+ getHandler.ServeHTTP(svcDefaults, w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode)
- assert.Equal(t, "{\"registry\":{\"namespaces\":{\"test\":{\"bootstrapEnabled\":true,\"cleanupEnabled\":false,\"coldWritesEnabled\":false,\"flushEnabled\":true,\"indexOptions\":null,\"repairEnabled\":false,\"retentionOptions\":{\"blockDataExpiry\":true,\"blockDataExpiryAfterNotAccessPeriodDuration\":\"1h0m0s\",\"blockSizeDuration\":\"2h0m0s\",\"bufferFutureDuration\":\"10m0s\",\"bufferPastDuration\":\"10m0s\",\"futureRetentionPeriodDuration\":\"0s\",\"retentionPeriodDuration\":\"48h0m0s\"},\"schemaOptions\":null,\"snapshotEnabled\":true,\"writesToCommitLog\":true}}}}", string(body))
+
+ expected := xtest.MustPrettyJSONMap(t,
+ xjson.Map{
+ "registry": xjson.Map{
+ "namespaces": xjson.Map{
+ "test": xjson.Map{
+ "bootstrapEnabled": true,
+ "cleanupEnabled": false,
+ "coldWritesEnabled": false,
+ "flushEnabled": true,
+ "indexOptions": nil,
+ "repairEnabled": false,
+ "retentionOptions": xjson.Map{
+ "blockDataExpiry": true,
+ "blockDataExpiryAfterNotAccessPeriodDuration": "1h0m0s",
+ "blockSizeDuration": "2h0m0s",
+ "bufferFutureDuration": "10m0s",
+ "bufferPastDuration": "10m0s",
+ "futureRetentionPeriodDuration": "0s",
+ "retentionPeriodDuration": "48h0m0s",
+ },
+ "runtimeOptions": nil,
+ "schemaOptions": nil,
+ "snapshotEnabled": true,
+ "writesToCommitLog": true,
+ },
+ },
+ },
+ })
+
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual,
+ xtest.Diff(expected, actual))
}
diff --git a/src/query/api/v1/handler/namespace/schema.go b/src/query/api/v1/handler/namespace/schema.go
index b2db9bf030..59c8ace92e 100644
--- a/src/query/api/v1/handler/namespace/schema.go
+++ b/src/query/api/v1/handler/namespace/schema.go
@@ -119,11 +119,7 @@ func (h *SchemaHandler) Add(
) (admin.NamespaceSchemaAddResponse, error) {
var emptyRep = admin.NamespaceSchemaAddResponse{}
- kvOpts := kv.NewOverrideOptions().
- SetEnvironment(opts.ServiceEnvironment).
- SetZone(opts.ServiceZone)
-
- store, err := h.client.Store(kvOpts)
+ store, err := h.client.Store(opts.KVOverrideOptions())
if err != nil {
return emptyRep, err
}
@@ -207,11 +203,7 @@ func (h *SchemaResetHandler) Reset(
return &emptyRep, fmt.Errorf("CAUTION! Reset schema will prevent proto-enabled namespace from loading, proceed if you know what you are doing, please retry with force set to true")
}
- kvOpts := kv.NewOverrideOptions().
- SetEnvironment(opts.ServiceEnvironment).
- SetZone(opts.ServiceZone)
-
- store, err := h.client.Store(kvOpts)
+ store, err := h.client.Store(opts.KVOverrideOptions())
if err != nil {
return &emptyRep, err
}
diff --git a/src/query/api/v1/handler/namespace/schema_test.go b/src/query/api/v1/handler/namespace/schema_test.go
index d40a2e465e..c95e7e2572 100644
--- a/src/query/api/v1/handler/namespace/schema_test.go
+++ b/src/query/api/v1/handler/namespace/schema_test.go
@@ -36,6 +36,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace/kvadmin"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -141,13 +142,12 @@ func TestSchemaDeploy_KVKeyNotFound(t *testing.T) {
// Error case where required fields are not set
w := httptest.NewRecorder()
- jsonInput := `
- {
- "name": "testNamespace"
- }
- `
+ jsonInput := xjson.Map{
+ "name": "testNamespace",
+ }
- req := httptest.NewRequest("POST", "/schema", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/schema",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(nil, kv.ErrNotFound)
@@ -209,14 +209,13 @@ func TestSchemaDeploy_NamespaceNotFound(t *testing.T) {
schemaHandler := NewSchemaHandler(mockClient, instrument.NewOptions())
mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil)
- jsonInput := `
- {
- "name": "no-such-namespace"
- }
- `
+ jsonInput := xjson.Map{
+ "name": "no-such-namespace",
+ }
// Ensure adding to an non-existing namespace returns 404
- req := httptest.NewRequest("POST", "/namespace", strings.NewReader(jsonInput))
+ req := httptest.NewRequest("POST", "/namespace",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
registry := nsproto.Registry{
@@ -269,12 +268,12 @@ func TestSchemaReset(t *testing.T) {
w := httptest.NewRecorder()
- jsonInput := `
- {
- "name": "testNamespace"
- }
- `
- req := httptest.NewRequest("DELETE", "/schema", strings.NewReader(jsonInput))
+ jsonInput := xjson.Map{
+ "name": "testNamespace",
+ }
+
+ req := httptest.NewRequest("DELETE", "/schema",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
schemaHandler.ServeHTTP(svcDefaults, w, req)
@@ -284,7 +283,8 @@ func TestSchemaReset(t *testing.T) {
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
w = httptest.NewRecorder()
- req = httptest.NewRequest("DELETE", "/schema", strings.NewReader(jsonInput))
+ req = httptest.NewRequest("DELETE", "/schema",
+ xjson.MustNewTestReader(t, jsonInput))
require.NotNil(t, req)
req.Header.Add("Force", "true")
diff --git a/src/query/api/v1/handler/namespace/update.go b/src/query/api/v1/handler/namespace/update.go
new file mode 100644
index 0000000000..ed4417d08e
--- /dev/null
+++ b/src/query/api/v1/handler/namespace/update.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package namespace
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "path"
+ "reflect"
+
+ clusterclient "github.com/m3db/m3/src/cluster/client"
+ nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/query/api/v1/handler"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+ "github.com/m3db/m3/src/query/util/logging"
+ "github.com/m3db/m3/src/x/instrument"
+ xhttp "github.com/m3db/m3/src/x/net/http"
+
+ "github.com/gogo/protobuf/jsonpb"
+ "go.uber.org/zap"
+)
+
+var (
+ // M3DBUpdateURL is the url for the M3DB namespace update handler.
+ M3DBUpdateURL = path.Join(handler.RoutePrefixV1, M3DBServiceNamespacePathName)
+
+ // UpdateHTTPMethod is the HTTP method used with this resource.
+ UpdateHTTPMethod = http.MethodPut
+
+ fieldNameRetentionOptions = "RetentionOptions"
+ fieldNameRetetionPeriod = "RetentionPeriodNanos"
+ fieldNameRuntimeOptions = "RuntimeOptions"
+
+ errEmptyNamespaceName = errors.New("must specify namespace name")
+ errEmptyNamespaceOptions = errors.New("update options cannot be empty")
+ errNamespaceFieldImmutable = errors.New("namespace option field is immutable")
+
+ allowedUpdateOptionsFields = map[string]struct{}{
+ fieldNameRetentionOptions: struct{}{},
+ fieldNameRuntimeOptions: struct{}{},
+ }
+)
+
+// UpdateHandler is the handler for namespace updates.
+type UpdateHandler Handler
+
+// NewUpdateHandler returns a new instance of UpdateHandler.
+func NewUpdateHandler(
+ client clusterclient.Client,
+ instrumentOpts instrument.Options,
+) *UpdateHandler {
+ return &UpdateHandler{
+ client: client,
+ instrumentOpts: instrumentOpts,
+ }
+}
+
+func (h *UpdateHandler) ServeHTTP(
+ svc handleroptions.ServiceNameAndDefaults,
+ w http.ResponseWriter,
+ r *http.Request,
+) {
+ ctx := r.Context()
+ logger := logging.WithContext(ctx, h.instrumentOpts)
+
+ md, rErr := h.parseRequest(r)
+ if rErr != nil {
+ logger.Warn("unable to parse request", zap.Error(rErr))
+ xhttp.Error(w, rErr.Inner(), rErr.Code())
+ return
+ }
+
+ opts := handleroptions.NewServiceOptions(svc, r.Header, nil)
+ nsRegistry, parseErr, err := h.Update(md, opts)
+ if parseErr != nil {
+ logger.Warn("update namespace bad request", zap.Error(parseErr))
+ xhttp.Error(w, parseErr.Inner(), parseErr.Code())
+ return
+ }
+ if err != nil {
+ logger.Error("unable to update namespace", zap.Error(err))
+ xhttp.Error(w, err, http.StatusInternalServerError)
+ return
+ }
+
+ resp := &admin.NamespaceGetResponse{
+ Registry: &nsRegistry,
+ }
+
+ xhttp.WriteProtoMsgJSONResponse(w, resp, logger)
+}
+
+func (h *UpdateHandler) parseRequest(r *http.Request) (*admin.NamespaceUpdateRequest, *xhttp.ParseError) {
+ defer r.Body.Close()
+ rBody, err := xhttp.DurationToNanosBytes(r.Body)
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ updateReq := new(admin.NamespaceUpdateRequest)
+ if err := jsonpb.Unmarshal(bytes.NewReader(rBody), updateReq); err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ if err := validateUpdateRequest(updateReq); err != nil {
+ err := fmt.Errorf("unable to validate update request: %w", err)
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ return updateReq, nil
+}
+
+// Ensure that only fields we allow to be updated (e.g. retention period) are
+// non-zero. Uses reflection to be resilient against adding more immutable
+// fields to namespaceOptions but forgetting to validate them here.
+func validateUpdateRequest(req *admin.NamespaceUpdateRequest) error {
+ if req.Name == "" {
+ return errEmptyNamespaceName
+ }
+
+ if req.Options == nil {
+ return errEmptyNamespaceOptions
+ }
+
+ optsVal := reflect.ValueOf(*req.Options)
+ allNonZeroFields := true
+ for i := 0; i < optsVal.NumField(); i++ {
+ field := optsVal.Field(i)
+ fieldName := optsVal.Type().Field(i).Name
+ if field.IsZero() {
+ continue
+ }
+
+ allNonZeroFields = false
+
+ _, ok := allowedUpdateOptionsFields[fieldName]
+ if !ok {
+ return fmt.Errorf("%s: %w", fieldName, errNamespaceFieldImmutable)
+ }
+ }
+
+ if allNonZeroFields {
+ return errEmptyNamespaceOptions
+ }
+
+ if opts := req.Options.RetentionOptions; opts != nil {
+ optsVal := reflect.ValueOf(*opts)
+ for i := 0; i < optsVal.NumField(); i++ {
+ field := optsVal.Field(i)
+ fieldName := optsVal.Type().Field(i).Name
+ if !field.IsZero() && fieldName != fieldNameRetetionPeriod {
+ return fmt.Errorf("%s.%s: %w", fieldNameRetentionOptions, fieldName, errNamespaceFieldImmutable)
+ }
+ }
+ }
+
+ return nil
+}
+
+// Update updates a namespace.
+func (h *UpdateHandler) Update(
+ updateReq *admin.NamespaceUpdateRequest,
+ opts handleroptions.ServiceOptions,
+) (nsproto.Registry, *xhttp.ParseError, error) {
+ var emptyReg = nsproto.Registry{}
+
+ store, err := h.client.Store(opts.KVOverrideOptions())
+ if err != nil {
+ return emptyReg, nil, err
+ }
+
+ currentMetadata, version, err := Metadata(store)
+ if err != nil {
+ return emptyReg, nil, err
+ }
+
+ newMetadata := make(map[string]namespace.Metadata)
+ for _, ns := range currentMetadata {
+ newMetadata[ns.ID().String()] = ns
+ }
+
+ ns, ok := newMetadata[updateReq.Name]
+ if !ok {
+ parseErr := xhttp.NewParseError(
+ fmt.Errorf("namespace not found: err=%s", updateReq.Name),
+ http.StatusNotFound)
+ return emptyReg, parseErr, nil
+ }
+
+ // Replace targeted namespace with modified retention.
+ if newRetentionOpts := updateReq.Options.RetentionOptions; newRetentionOpts != nil {
+ if newNanos := newRetentionOpts.RetentionPeriodNanos; newNanos != 0 {
+ dur := namespace.FromNanos(newNanos)
+ retentionOpts := ns.Options().RetentionOptions().
+ SetRetentionPeriod(dur)
+ opts := ns.Options().
+ SetRetentionOptions(retentionOpts)
+ ns, err = namespace.NewMetadata(ns.ID(), opts)
+ if err != nil {
+ return emptyReg, nil, fmt.Errorf("error constructing new metadata: %w", err)
+ }
+ }
+ }
+
+ // Update runtime options.
+ if newRuntimeOpts := updateReq.Options.RuntimeOptions; newRuntimeOpts != nil {
+ runtimeOpts := ns.Options().RuntimeOptions()
+ if v := newRuntimeOpts.WriteIndexingPerCPUConcurrency; v != nil {
+ runtimeOpts = runtimeOpts.SetWriteIndexingPerCPUConcurrency(&v.Value)
+ }
+ if v := newRuntimeOpts.FlushIndexingPerCPUConcurrency; v != nil {
+ runtimeOpts = runtimeOpts.SetFlushIndexingPerCPUConcurrency(&v.Value)
+ }
+ opts := ns.Options().
+ SetRuntimeOptions(runtimeOpts)
+ ns, err = namespace.NewMetadata(ns.ID(), opts)
+ if err != nil {
+ return emptyReg, nil, fmt.Errorf("error constructing new metadata: %w", err)
+ }
+ }
+
+ // Update the namespace in case an update occurred.
+ newMetadata[updateReq.Name] = ns
+
+ // Set the new slice and update.
+ newMDs := make([]namespace.Metadata, 0, len(newMetadata))
+ for _, elem := range newMetadata {
+ newMDs = append(newMDs, elem)
+ }
+ nsMap, err := namespace.NewMap(newMDs)
+ if err != nil {
+ return emptyReg, nil, err
+ }
+
+ protoRegistry := namespace.ToProto(nsMap)
+ _, err = store.CheckAndSet(M3DBNodeNamespacesKey, version, protoRegistry)
+ if err != nil {
+ return emptyReg, nil, fmt.Errorf("failed to update namespace: %w", err)
+ }
+
+ return *protoRegistry, nil, nil
+}
diff --git a/src/query/api/v1/handler/namespace/update_test.go b/src/query/api/v1/handler/namespace/update_test.go
new file mode 100644
index 0000000000..3ffd063f59
--- /dev/null
+++ b/src/query/api/v1/handler/namespace/update_test.go
@@ -0,0 +1,319 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package namespace
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ "github.com/m3db/m3/src/cluster/kv"
+ nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+ "github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ testUpdateJSON = `
+{
+ "name": "testNamespace",
+ "options": {
+ "retentionOptions": {
+ "retentionPeriodNanos": 345600000000000
+ }
+ }
+}
+`
+
+ testUpdateJSONNop = `
+{
+ "name": "testNamespace",
+ "options": {
+ "retentionOptions": {}
+ }
+}
+`
+)
+
+func TestNamespaceUpdateHandler(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockClient, mockKV := setupNamespaceTest(t, ctrl)
+ updateHandler := NewUpdateHandler(mockClient, instrument.NewOptions())
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil).Times(2)
+
+ // Error case where required fields are not set
+ w := httptest.NewRecorder()
+
+ jsonInput := xjson.Map{
+ "name": "testNamespace",
+ "options": xjson.Map{},
+ }
+
+ req := httptest.NewRequest("POST", "/namespace",
+ xjson.MustNewTestReader(t, jsonInput))
+ require.NotNil(t, req)
+
+ updateHandler.ServeHTTP(svcDefaults, w, req)
+
+ resp := w.Result()
+ body, err := ioutil.ReadAll(resp.Body)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
+ assert.Equal(t, "{\"error\":\"unable to validate update request: update options cannot be empty\"}\n", string(body))
+
+ // Test good case. Note: there is no way to tell the difference between a boolean
+ // being false and it not being set by a user.
+ w = httptest.NewRecorder()
+
+ req = httptest.NewRequest("PUT", "/namespace", strings.NewReader(testUpdateJSON))
+ require.NotNil(t, req)
+
+ registry := nsproto.Registry{
+ Namespaces: map[string]*nsproto.NamespaceOptions{
+ "testNamespace": {
+ BootstrapEnabled: true,
+ FlushEnabled: true,
+ SnapshotEnabled: true,
+ WritesToCommitLog: true,
+ CleanupEnabled: false,
+ RepairEnabled: false,
+ RetentionOptions: &nsproto.RetentionOptions{
+ RetentionPeriodNanos: 172800000000000,
+ BlockSizeNanos: 7200000000000,
+ BufferFutureNanos: 600000000000,
+ BufferPastNanos: 600000000000,
+ BlockDataExpiry: true,
+ BlockDataExpiryAfterNotAccessPeriodNanos: 3600000000000,
+ },
+ },
+ },
+ }
+
+ mockValue := kv.NewMockValue(ctrl)
+ mockValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).SetArg(0, registry)
+ mockValue.EXPECT().Version().Return(0)
+ mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(mockValue, nil)
+
+ mockKV.EXPECT().CheckAndSet(M3DBNodeNamespacesKey, gomock.Any(), gomock.Not(nil)).Return(1, nil)
+ updateHandler.ServeHTTP(svcDefaults, w, req)
+
+ resp = w.Result()
+ body, _ = ioutil.ReadAll(resp.Body)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+
+ expected := xtest.MustPrettyJSONMap(t,
+ xjson.Map{
+ "registry": xjson.Map{
+ "namespaces": xjson.Map{
+ "testNamespace": xjson.Map{
+ "bootstrapEnabled": true,
+ "flushEnabled": true,
+ "writesToCommitLog": true,
+ "cleanupEnabled": false,
+ "repairEnabled": false,
+ "retentionOptions": xjson.Map{
+ "retentionPeriodNanos": "345600000000000",
+ "blockSizeNanos": "7200000000000",
+ "bufferFutureNanos": "600000000000",
+ "bufferPastNanos": "600000000000",
+ "blockDataExpiry": true,
+ "blockDataExpiryAfterNotAccessPeriodNanos": "3600000000000",
+ "futureRetentionPeriodNanos": "0",
+ },
+ "snapshotEnabled": true,
+ "indexOptions": xjson.Map{
+ "enabled": false,
+ "blockSizeNanos": "7200000000000",
+ },
+ "runtimeOptions": nil,
+ "schemaOptions": nil,
+ "coldWritesEnabled": false,
+ },
+ },
+ },
+ })
+
+ actual := xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual,
+ xtest.Diff(expected, actual))
+
+ // Ensure an empty request respects existing namespaces.
+ w = httptest.NewRecorder()
+ req = httptest.NewRequest("PUT", "/namespace", strings.NewReader(testUpdateJSONNop))
+ require.NotNil(t, req)
+
+ mockValue = kv.NewMockValue(ctrl)
+ mockValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).SetArg(0, registry)
+ mockValue.EXPECT().Version().Return(0)
+ mockKV.EXPECT().Get(M3DBNodeNamespacesKey).Return(mockValue, nil)
+
+ mockKV.EXPECT().CheckAndSet(M3DBNodeNamespacesKey, gomock.Any(), gomock.Not(nil)).Return(1, nil)
+ updateHandler.ServeHTTP(svcDefaults, w, req)
+
+ resp = w.Result()
+ body, _ = ioutil.ReadAll(resp.Body)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+
+ expected = xtest.MustPrettyJSONMap(t,
+ xjson.Map{
+ "registry": xjson.Map{
+ "namespaces": xjson.Map{
+ "testNamespace": xjson.Map{
+ "bootstrapEnabled": true,
+ "flushEnabled": true,
+ "writesToCommitLog": true,
+ "cleanupEnabled": false,
+ "repairEnabled": false,
+ "retentionOptions": xjson.Map{
+ "retentionPeriodNanos": "172800000000000",
+ "blockSizeNanos": "7200000000000",
+ "bufferFutureNanos": "600000000000",
+ "bufferPastNanos": "600000000000",
+ "blockDataExpiry": true,
+ "blockDataExpiryAfterNotAccessPeriodNanos": "3600000000000",
+ "futureRetentionPeriodNanos": "0",
+ },
+ "snapshotEnabled": true,
+ "indexOptions": xjson.Map{
+ "enabled": false,
+ "blockSizeNanos": "7200000000000",
+ },
+ "runtimeOptions": nil,
+ "schemaOptions": nil,
+ "coldWritesEnabled": false,
+ },
+ },
+ },
+ })
+
+ actual = xtest.MustPrettyJSONString(t, string(body))
+
+ assert.Equal(t, expected, actual,
+ xtest.Diff(expected, actual))
+}
+
+func TestValidateUpdateRequest(t *testing.T) {
+ var (
+ reqEmptyName = &admin.NamespaceUpdateRequest{
+ Options: &nsproto.NamespaceOptions{
+ BootstrapEnabled: true,
+ },
+ }
+
+ reqEmptyOptions = &admin.NamespaceUpdateRequest{
+ Name: "foo",
+ }
+
+ reqNoNonZeroFields = &admin.NamespaceUpdateRequest{
+ Name: "foo",
+ Options: &nsproto.NamespaceOptions{},
+ }
+
+ reqNonZeroBootstrap = &admin.NamespaceUpdateRequest{
+ Name: "foo",
+ Options: &nsproto.NamespaceOptions{
+ RetentionOptions: &nsproto.RetentionOptions{
+ BlockSizeNanos: 1,
+ },
+ BootstrapEnabled: true,
+ },
+ }
+
+ reqNonZeroBlockSize = &admin.NamespaceUpdateRequest{
+ Name: "foo",
+ Options: &nsproto.NamespaceOptions{
+ RetentionOptions: &nsproto.RetentionOptions{
+ BlockSizeNanos: 1,
+ },
+ },
+ }
+
+ reqValid = &admin.NamespaceUpdateRequest{
+ Name: "foo",
+ Options: &nsproto.NamespaceOptions{
+ RetentionOptions: &nsproto.RetentionOptions{
+ RetentionPeriodNanos: 1,
+ },
+ },
+ }
+ )
+
+ for _, test := range []struct {
+ name string
+ request *admin.NamespaceUpdateRequest
+ expErr error
+ }{
+ {
+ name: "emptyName",
+ request: reqEmptyName,
+ expErr: errEmptyNamespaceName,
+ },
+ {
+ name: "emptyOptions",
+ request: reqEmptyOptions,
+ expErr: errEmptyNamespaceOptions,
+ },
+ {
+ name: "emptyNoNonZeroFields",
+ request: reqNoNonZeroFields,
+ expErr: errEmptyNamespaceOptions,
+ },
+ {
+ name: "nonZeroBootstrapField",
+ request: reqNonZeroBootstrap,
+ expErr: errNamespaceFieldImmutable,
+ },
+ {
+ name: "nonZeroBlockSize",
+ request: reqNonZeroBlockSize,
+ expErr: errNamespaceFieldImmutable,
+ },
+ {
+ name: "valid",
+ request: reqValid,
+ expErr: nil,
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ err := validateUpdateRequest(test.request)
+ if err != nil {
+ assert.True(t, errors.Is(err, test.expErr),
+ fmt.Sprintf("expected=%s, actual=%s", test.expErr, err))
+ return
+ }
+
+ assert.NoError(t, err)
+ })
+ }
+}
diff --git a/src/query/api/v1/handler/openapi/openapi.go b/src/query/api/v1/handler/openapi/openapi.go
index af31b0adf0..5175194fc1 100644
--- a/src/query/api/v1/handler/openapi/openapi.go
+++ b/src/query/api/v1/handler/openapi/openapi.go
@@ -74,6 +74,7 @@ func (h *DocHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeHTMLUTF8)
w.Write(doc)
}
diff --git a/src/query/api/v1/handler/placement/add_test.go b/src/query/api/v1/handler/placement/add_test.go
index c7dd54ac75..423d0afc81 100644
--- a/src/query/api/v1/handler/placement/add_test.go
+++ b/src/query/api/v1/handler/placement/add_test.go
@@ -282,7 +282,7 @@ func TestPlacementAddHandler_SafeOK(t *testing.T) {
switch serviceName {
case handleroptions.M3CoordinatorServiceName:
- require.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host1:1234","shards":[],"shardSetId":0,"hostname":"host1","port":1234}},"replicaFactor":1,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":1}`, string(body))
+ require.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host1:1234","shards":[],"shardSetId":0,"hostname":"host1","port":1234,"metadata":{"debugPort":0}}},"replicaFactor":1,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":1}`, string(body))
case handleroptions.M3AggregatorServiceName:
require.Equal(t, `{"placement":{"instances":{},"replicaFactor":1,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":true,"maxShardSetId":0},"version":1}`, string(body))
default:
diff --git a/src/query/api/v1/handler/placement/common.go b/src/query/api/v1/handler/placement/common.go
index df13ce2d85..62740ea455 100644
--- a/src/query/api/v1/handler/placement/common.go
+++ b/src/query/api/v1/handler/placement/common.go
@@ -210,22 +210,10 @@ func ConvertInstancesProto(instancesProto []*placementpb.Instance) ([]placement.
res := make([]placement.Instance, 0, len(instancesProto))
for _, instanceProto := range instancesProto {
- shards, err := shard.NewShardsFromProto(instanceProto.Shards)
+ instance, err := placement.NewInstanceFromProto(instanceProto)
if err != nil {
return nil, err
}
-
- instance := placement.NewInstance().
- SetEndpoint(instanceProto.Endpoint).
- SetHostname(instanceProto.Hostname).
- SetID(instanceProto.Id).
- SetPort(instanceProto.Port).
- SetIsolationGroup(instanceProto.IsolationGroup).
- SetShards(shards).
- SetShardSetID(instanceProto.ShardSetId).
- SetWeight(instanceProto.Weight).
- SetZone(instanceProto.Zone)
-
res = append(res, instance)
}
diff --git a/src/query/api/v1/handler/placement/common_test.go b/src/query/api/v1/handler/placement/common_test.go
index c0b8eaad70..fc43feef55 100644
--- a/src/query/api/v1/handler/placement/common_test.go
+++ b/src/query/api/v1/handler/placement/common_test.go
@@ -57,9 +57,9 @@ func TestPlacementService(t *testing.T) {
ServiceName: serviceName,
}
- placementService, algo, err := ServiceWithAlgo(
- mockClient, handleroptions.
- NewServiceOptions(svcDefaults, nil, nil), time.Time{}, nil)
+ placementService, algo, err := ServiceWithAlgo(mockClient,
+ handleroptions.NewServiceOptions(svcDefaults, nil, nil),
+ time.Time{}, nil)
assert.NoError(t, err)
assert.NotNil(t, placementService)
assert.NotNil(t, algo)
@@ -67,19 +67,20 @@ func TestPlacementService(t *testing.T) {
// Test Services returns error
mockClient.EXPECT().Services(gomock.Not(nil)).
Return(nil, errors.New("dummy service error"))
- placementService, err = Service(
- mockClient, handleroptions.
- NewServiceOptions(svcDefaults, nil, nil), time.Time{}, nil)
+ placementService, err = Service(mockClient,
+ handleroptions.NewServiceOptions(svcDefaults, nil, nil),
+ time.Time{}, nil)
assert.Nil(t, placementService)
assert.EqualError(t, err, "dummy service error")
// Test PlacementService returns error
mockClient.EXPECT().Services(gomock.Not(nil)).Return(mockServices, nil)
- mockServices.EXPECT().PlacementService(gomock.Not(nil), gomock.Not(nil)).
+ mockServices.EXPECT().
+ PlacementService(gomock.Not(nil), gomock.Not(nil)).
Return(nil, errors.New("dummy placement error"))
- placementService, err = Service(
- mockClient, handleroptions.
- NewServiceOptions(svcDefaults, nil, nil), time.Time{}, nil)
+ placementService, err = Service(mockClient,
+ handleroptions.NewServiceOptions(svcDefaults, nil, nil),
+ time.Time{}, nil)
assert.Nil(t, placementService)
assert.EqualError(t, err, "dummy placement error")
})
@@ -147,11 +148,14 @@ func TestConvertInstancesProto(t *testing.T) {
Endpoint: "i1:1234",
Hostname: "i1",
Port: 1234,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 4231,
+ },
},
})
require.NoError(t, err)
require.Equal(t, 1, len(instances))
- require.Equal(t, "Instance[ID=i1, IsolationGroup=r1, Zone=, Weight=1, Endpoint=i1:1234, Hostname=i1, Port=1234, ShardSetID=0, Shards=[Initializing=[], Available=[], Leaving=[]]]", instances[0].String())
+ require.Equal(t, "Instance[ID=i1, IsolationGroup=r1, Zone=, Weight=1, Endpoint=i1:1234, Hostname=i1, Port=1234, ShardSetID=0, Shards=[Initializing=[], Available=[], Leaving=[]], Metadata={DebugPort:4231}]", instances[0].String())
instances, err = ConvertInstancesProto([]*placementpb.Instance{
&placementpb.Instance{
@@ -174,6 +178,9 @@ func TestConvertInstancesProto(t *testing.T) {
SourceId: "s1",
},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
&placementpb.Instance{
Id: "i2",
@@ -195,6 +202,9 @@ func TestConvertInstancesProto(t *testing.T) {
SourceId: "s2",
},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 2,
+ },
},
&placementpb.Instance{
Id: "i3",
@@ -213,13 +223,16 @@ func TestConvertInstancesProto(t *testing.T) {
CutoffNanos: 3,
},
},
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 3,
+ },
},
})
require.NoError(t, err)
require.Equal(t, 3, len(instances))
- require.Equal(t, "Instance[ID=i1, IsolationGroup=r1, Zone=, Weight=1, Endpoint=i1:1234, Hostname=i1, Port=1234, ShardSetID=1, Shards=[Initializing=[], Available=[1 2], Leaving=[]]]", instances[0].String())
- require.Equal(t, "Instance[ID=i2, IsolationGroup=r1, Zone=, Weight=1, Endpoint=i2:1234, Hostname=i2, Port=1234, ShardSetID=1, Shards=[Initializing=[], Available=[1], Leaving=[]]]", instances[1].String())
- require.Equal(t, "Instance[ID=i3, IsolationGroup=r2, Zone=, Weight=2, Endpoint=i3:1234, Hostname=i3, Port=1234, ShardSetID=2, Shards=[Initializing=[1], Available=[], Leaving=[]]]", instances[2].String())
+ require.Equal(t, "Instance[ID=i1, IsolationGroup=r1, Zone=, Weight=1, Endpoint=i1:1234, Hostname=i1, Port=1234, ShardSetID=1, Shards=[Initializing=[], Available=[1 2], Leaving=[]], Metadata={DebugPort:1}]", instances[0].String())
+ require.Equal(t, "Instance[ID=i2, IsolationGroup=r1, Zone=, Weight=1, Endpoint=i2:1234, Hostname=i2, Port=1234, ShardSetID=1, Shards=[Initializing=[], Available=[1], Leaving=[]], Metadata={DebugPort:2}]", instances[1].String())
+ require.Equal(t, "Instance[ID=i3, IsolationGroup=r2, Zone=, Weight=2, Endpoint=i3:1234, Hostname=i3, Port=1234, ShardSetID=2, Shards=[Initializing=[1], Available=[], Leaving=[]], Metadata={DebugPort:3}]", instances[2].String())
_, err = ConvertInstancesProto([]*placementpb.Instance{
&placementpb.Instance{
diff --git a/src/query/api/v1/handler/placement/delete_test.go b/src/query/api/v1/handler/placement/delete_test.go
index 1b216403a3..ec03ab3d74 100644
--- a/src/query/api/v1/handler/placement/delete_test.go
+++ b/src/query/api/v1/handler/placement/delete_test.go
@@ -365,8 +365,8 @@ func testDeleteHandlerSafe(t *testing.T, serviceName string) {
case handleroptions.M3CoordinatorServiceName:
require.Equal(t, `{"placement":{"instances":{},"replicaFactor":0,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":0}`, string(body))
case handleroptions.M3AggregatorServiceName:
- require.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"a","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"300000000000"}],"shardSetId":0,"hostname":"","port":0},"host2":{"id":"host2","isolationGroup":"b","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"INITIALIZING","sourceId":"host1","cutoverNanos":"300000000000","cutoffNanos":"0"},{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":1,"hostname":"","port":0}},"replicaFactor":1,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":true,"maxShardSetId":2},"version":2}`, string(body))
+ require.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"a","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"300000000000"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"host2":{"id":"host2","isolationGroup":"b","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"INITIALIZING","sourceId":"host1","cutoverNanos":"300000000000","cutoffNanos":"0"},{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":1,"hostname":"","port":0,"metadata":{"debugPort":0}}},"replicaFactor":1,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":true,"maxShardSetId":2},"version":2}`, string(body))
default:
- require.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"a","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0},"host2":{"id":"host2","isolationGroup":"b","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"},{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0},"host3":{"id":"host3","isolationGroup":"c","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"INITIALIZING","sourceId":"host1","cutoverNanos":"0","cutoffNanos":"0"},{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0}},"replicaFactor":2,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":false,"maxShardSetId":2},"version":2}`, string(body))
+ require.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"a","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"host2":{"id":"host2","isolationGroup":"b","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"},{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"host3":{"id":"host3","isolationGroup":"c","zone":"","weight":10,"endpoint":"","shards":[{"id":0,"state":"INITIALIZING","sourceId":"host1","cutoverNanos":"0","cutoffNanos":"0"},{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}}},"replicaFactor":2,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":false,"maxShardSetId":2},"version":2}`, string(body))
}
}
diff --git a/src/query/api/v1/handler/placement/get_test.go b/src/query/api/v1/handler/placement/get_test.go
index ccf924c00c..6262a380fc 100644
--- a/src/query/api/v1/handler/placement/get_test.go
+++ b/src/query/api/v1/handler/placement/get_test.go
@@ -108,6 +108,9 @@ func TestPlacementGetHandler(t *testing.T) {
Endpoint: "http://host1:1234",
Hostname: "host1",
Port: 1234,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 1,
+ },
},
"host2": &placementpb.Instance{
Id: "host2",
@@ -117,11 +120,14 @@ func TestPlacementGetHandler(t *testing.T) {
Endpoint: "http://host2:1234",
Hostname: "host2",
Port: 1234,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 2,
+ },
},
},
}
- const placementJSON = `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host1:1234","shards":[],"shardSetId":0,"hostname":"host1","port":1234},"host2":{"id":"host2","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host2:1234","shards":[],"shardSetId":0,"hostname":"host2","port":1234}},"replicaFactor":0,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":%d}`
+ const placementJSON = `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host1:1234","shards":[],"shardSetId":0,"hostname":"host1","port":1234,"metadata":{"debugPort":1}},"host2":{"id":"host2","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host2:1234","shards":[],"shardSetId":0,"hostname":"host2","port":1234,"metadata":{"debugPort":2}}},"replicaFactor":0,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":%d}`
placementObj, err := placement.NewPlacementFromProto(placementProto)
require.NoError(t, err)
diff --git a/src/query/api/v1/handler/placement/init_test.go b/src/query/api/v1/handler/placement/init_test.go
index 1c6c63b25e..3bec2ce906 100644
--- a/src/query/api/v1/handler/placement/init_test.go
+++ b/src/query/api/v1/handler/placement/init_test.go
@@ -51,6 +51,9 @@ var (
Endpoint: "http://host1:1234",
Hostname: "host1",
Port: 1234,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 0,
+ },
},
"host2": &placementpb.Instance{
Id: "host2",
@@ -60,6 +63,9 @@ var (
Endpoint: "http://host2:1234",
Hostname: "host2",
Port: 1234,
+ Metadata: &placementpb.InstanceMetadata{
+ DebugPort: 0,
+ },
},
},
}
@@ -82,9 +88,9 @@ func TestPlacementInitHandler(t *testing.T) {
req *http.Request
)
if serviceName == handleroptions.M3AggregatorServiceName {
- req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host1:1234","hostname": "host1","port": 1234},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234}],"num_shards": 16,"replication_factor": 1}`))
+ req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host1:1234","hostname": "host1","port": 1234, "metadata": {"debugPort":0}},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234, "metadata": {"debugPort":0}}],"num_shards": 16,"replication_factor": 1}`))
} else {
- req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host1:1234","hostname": "host1","port": 1234},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234}],"num_shards": 16,"replication_factor": 1}`))
+ req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host1:1234","hostname": "host1","port": 1234, "metadata": {"debugPort":0}},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234, "metadata": {"debugPort":0}}],"num_shards": 16,"replication_factor": 1}`))
}
require.NotNil(t, req)
@@ -102,14 +108,14 @@ func TestPlacementInitHandler(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
- assert.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host1:1234","shards":[],"shardSetId":0,"hostname":"host1","port":1234},"host2":{"id":"host2","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host2:1234","shards":[],"shardSetId":0,"hostname":"host2","port":1234}},"replicaFactor":0,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":0}`, string(body))
+ assert.Equal(t, `{"placement":{"instances":{"host1":{"id":"host1","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host1:1234","shards":[],"shardSetId":0,"hostname":"host1","port":1234,"metadata":{"debugPort":0}},"host2":{"id":"host2","isolationGroup":"rack1","zone":"test","weight":1,"endpoint":"http://host2:1234","shards":[],"shardSetId":0,"hostname":"host2","port":1234,"metadata":{"debugPort":0}}},"replicaFactor":0,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":0}`, string(body))
// Test error response
w = httptest.NewRecorder()
if serviceName == handleroptions.M3AggregatorServiceName {
- req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234}],"num_shards": 64,"replication_factor": 2}`))
+ req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234, "metadata": {"debugPort":0}},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234, "metadata": {"debugPort":0}}],"num_shards": 64,"replication_factor": 2}`))
} else {
- req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234}],"num_shards": 64,"replication_factor": 2}`))
+ req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234, "metadata": {"debugPort":0}},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234, "metadata": {"debugPort":0}}],"num_shards": 64,"replication_factor": 2}`))
}
require.NotNil(t, req)
@@ -134,9 +140,9 @@ func TestPlacementInitHandler(t *testing.T) {
// Test error response
w = httptest.NewRecorder()
if serviceName == handleroptions.M3AggregatorServiceName {
- req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234}],"num_shards": 64,"replication_factor": 2}`))
+ req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234, "metadata": {"debugPort":0}},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234, "metadata": {"debugPort":0}}],"num_shards": 64,"replication_factor": 2}`))
} else {
- req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234}],"num_shards": 64,"replication_factor": 2}`))
+ req = httptest.NewRequest(InitHTTPMethod, M3DBInitURL, strings.NewReader(`{"instances": [{"id": "host1","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "host1:1234","hostname": "host1","port": 1234, "metadata": {"debugPort":0}},{"id": "host2","isolation_group": "rack1","zone": "test","weight": 1,"endpoint": "http://host2:1234","hostname": "host2","port": 1234, "metadata": {"debugPort":0}}],"num_shards": 64,"replication_factor": 2}`))
}
require.NotNil(t, req)
diff --git a/src/query/api/v1/handler/placement/replace.go b/src/query/api/v1/handler/placement/replace.go
index ea02466e3c..a37e105c3f 100644
--- a/src/query/api/v1/handler/placement/replace.go
+++ b/src/query/api/v1/handler/placement/replace.go
@@ -51,12 +51,12 @@ var (
// M3AggReplaceURL is the url for the m3aggregator replace handler (method
// POST).
M3AggReplaceURL = path.Join(handler.RoutePrefixV1,
- handleroptions.M3AggregatorServiceName, replacePathName)
+ M3AggServicePlacementPathName, replacePathName)
// M3CoordinatorReplaceURL is the url for the m3coordinator replace handler
// (method POST).
M3CoordinatorReplaceURL = path.Join(handler.RoutePrefixV1,
- handleroptions.M3CoordinatorServiceName, replacePathName)
+ M3CoordinatorServicePlacementPathName, replacePathName)
)
// ReplaceHandler is the type for placement replaces.
diff --git a/src/query/api/v1/handler/placement/replace_test.go b/src/query/api/v1/handler/placement/replace_test.go
index 2b5ccf4d98..80f80549f5 100644
--- a/src/query/api/v1/handler/placement/replace_test.go
+++ b/src/query/api/v1/handler/placement/replace_test.go
@@ -258,13 +258,13 @@ func testPlacementReplaceHandlerSafeOk(t *testing.T, serviceName string) {
switch serviceName {
case handleroptions.M3CoordinatorServiceName:
- exp := `{"placement":{"instances":{"B":{"id":"B","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[],"shardSetId":0,"hostname":"","port":0},"C":{"id":"C","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[],"shardSetId":0,"hostname":"","port":0}},"replicaFactor":0,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":2}`
+ exp := `{"placement":{"instances":{"B":{"id":"B","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"C":{"id":"C","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}}},"replicaFactor":0,"numShards":0,"isSharded":false,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":2}`
assert.Equal(t, exp, string(body))
case handleroptions.M3DBServiceName:
- exp := `{"placement":{"instances":{"A":{"id":"A","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0},"B":{"id":"B","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0},"C":{"id":"C","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"INITIALIZING","sourceId":"A","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0}},"replicaFactor":0,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":2}`
+ exp := `{"placement":{"instances":{"A":{"id":"A","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"B":{"id":"B","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"C":{"id":"C","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"INITIALIZING","sourceId":"A","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}}},"replicaFactor":0,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":false,"maxShardSetId":0},"version":2}`
assert.Equal(t, exp, string(body))
case handleroptions.M3AggregatorServiceName:
- exp := `{"placement":{"instances":{"A":{"id":"A","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0},"B":{"id":"B","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0},"C":{"id":"C","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"INITIALIZING","sourceId":"A","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0}},"replicaFactor":0,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":true,"maxShardSetId":0},"version":2}`
+ exp := `{"placement":{"instances":{"A":{"id":"A","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"LEAVING","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"B":{"id":"B","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"AVAILABLE","sourceId":"","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}},"C":{"id":"C","isolationGroup":"r1","zone":"z1","weight":1,"endpoint":"","shards":[{"id":1,"state":"INITIALIZING","sourceId":"A","cutoverNanos":"0","cutoffNanos":"0"}],"shardSetId":0,"hostname":"","port":0,"metadata":{"debugPort":0}}},"replicaFactor":0,"numShards":0,"isSharded":true,"cutoverTime":"0","isMirrored":true,"maxShardSetId":0},"version":2}`
assert.Equal(t, exp, string(body))
default:
t.Errorf("unknown service name %s", serviceName)
diff --git a/src/query/api/v1/handler/placement/set.go b/src/query/api/v1/handler/placement/set.go
index 19801e72b2..03342efb02 100644
--- a/src/query/api/v1/handler/placement/set.go
+++ b/src/query/api/v1/handler/placement/set.go
@@ -52,12 +52,12 @@ var (
// M3AggSetURL is the url for the m3aggregator replace handler (method
// POST).
M3AggSetURL = path.Join(handler.RoutePrefixV1,
- handleroptions.M3AggregatorServiceName, setPathName)
+ M3AggServicePlacementPathName, setPathName)
// M3CoordinatorSetURL is the url for the m3coordinator replace handler
// (method POST).
M3CoordinatorSetURL = path.Join(handler.RoutePrefixV1,
- handleroptions.M3CoordinatorServiceName, setPathName)
+ M3CoordinatorServicePlacementPathName, setPathName)
)
// SetHandler is the type for placement replaces.
@@ -78,7 +78,8 @@ func (h *SetHandler) ServeHTTP(
req, pErr := h.parseRequest(r)
if pErr != nil {
- xhttp.Error(w, pErr.Inner(), pErr.Code())
+ logger.Error("unable to parse request", zap.Error(pErr))
+ xhttp.Error(w, pErr, http.StatusBadRequest)
return
}
@@ -143,12 +144,12 @@ func (h *SetHandler) ServeHTTP(
xhttp.WriteProtoMsgJSONResponse(w, resp, logger)
}
-func (h *SetHandler) parseRequest(r *http.Request) (*admin.PlacementSetRequest, *xhttp.ParseError) {
+func (h *SetHandler) parseRequest(r *http.Request) (*admin.PlacementSetRequest, error) {
defer r.Body.Close()
req := &admin.PlacementSetRequest{}
if err := jsonpb.Unmarshal(r.Body, req); err != nil {
- return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ return nil, err
}
return req, nil
diff --git a/src/query/api/v1/handler/placement/set_test.go b/src/query/api/v1/handler/placement/set_test.go
index a08187bdb0..047a862175 100644
--- a/src/query/api/v1/handler/placement/set_test.go
+++ b/src/query/api/v1/handler/placement/set_test.go
@@ -144,7 +144,10 @@ func TestPlacementSetHandler(t *testing.T) {
DryRun: !setTestPlacementReqProto.Confirm,
})
require.NoError(t, err)
- assert.Equal(t, expectedBody, body,
- xtest.Diff(xtest.MustPrettyJSON(t, expectedBody), xtest.MustPrettyJSON(t, body)))
+
+ expected := xtest.MustPrettyJSONString(t, expectedBody)
+ actual := xtest.MustPrettyJSONString(t, body)
+
+ assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
})
}
diff --git a/src/query/api/v1/handler/prom/common.go b/src/query/api/v1/handler/prom/common.go
new file mode 100644
index 0000000000..69f8e64a40
--- /dev/null
+++ b/src/query/api/v1/handler/prom/common.go
@@ -0,0 +1,125 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prom
+
+import (
+ "math"
+ "net/http"
+ "time"
+
+ xhttp "github.com/m3db/m3/src/x/net/http"
+
+ jsoniter "github.com/json-iterator/go"
+ promql "github.com/prometheus/prometheus/promql/parser"
+ promstorage "github.com/prometheus/prometheus/storage"
+)
+
+// All of this is taken from prometheus to ensure we have consistent return/error
+// formats with prometheus.
+// https://github.com/prometheus/prometheus/blob/43acd0e2e93f9f70c49b2267efa0124f1e759e86/web/api/v1/api.go#L1097
+
+const (
+ queryParam = "query"
+ startParam = "start"
+ endParam = "end"
+ stepParam = "step"
+ timeoutParam = "timeout"
+)
+
+var (
+ minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
+ maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
+
+ minTimeFormatted = minTime.Format(time.RFC3339Nano)
+ maxTimeFormatted = maxTime.Format(time.RFC3339Nano)
+)
+
+type status string
+
+const (
+ statusSuccess status = "success"
+ statusError status = "error"
+)
+
+type errorType string
+
+const (
+ errorNone errorType = ""
+ errorTimeout errorType = "timeout"
+ errorCanceled errorType = "canceled"
+ errorExec errorType = "execution"
+ errorBadData errorType = "bad_data"
+ errorInternal errorType = "internal"
+ errorUnavailable errorType = "unavailable"
+ errorNotFound errorType = "not_found"
+)
+
+type queryData struct {
+ ResultType promql.ValueType `json:"resultType"`
+ Result promql.Value `json:"result"`
+}
+
+type response struct {
+ Status status `json:"status"`
+ Data interface{} `json:"data,omitempty"`
+ ErrorType errorType `json:"errorType,omitempty"`
+ Error string `json:"error,omitempty"`
+ Warnings []string `json:"warnings,omitempty"`
+}
+
+func respond(w http.ResponseWriter, data interface{}, warnings promstorage.Warnings) {
+ statusMessage := statusSuccess
+ var warningStrings []string
+ for _, warning := range warnings {
+ warningStrings = append(warningStrings, warning.Error())
+ }
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+ b, err := json.Marshal(&response{
+ Status: statusMessage,
+ Data: data,
+ Warnings: warningStrings,
+ })
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
+ w.WriteHeader(http.StatusOK)
+ w.Write(b)
+}
+
+func respondError(w http.ResponseWriter, err error, code int) {
+ json := jsoniter.ConfigCompatibleWithStandardLibrary
+ b, err := json.Marshal(&response{
+ Status: statusError,
+ Error: err.Error(),
+ })
+
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
+ w.WriteHeader(code)
+ w.Write(b)
+}
diff --git a/src/query/api/v1/handler/prom/mocks.go b/src/query/api/v1/handler/prom/mocks.go
new file mode 100644
index 0000000000..7e6e7da830
--- /dev/null
+++ b/src/query/api/v1/handler/prom/mocks.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prom
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/go-kit/kit/log"
+ kitlogzap "github.com/go-kit/kit/log/zap"
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/promql"
+ promstorage "github.com/prometheus/prometheus/storage"
+ "go.uber.org/zap/zapcore"
+)
+
+type mockQuerier struct {
+ mockOptions
+}
+
+type mockSeriesSet struct {
+ mockOptions
+ promstorage.SeriesSet
+}
+
+func (m *mockSeriesSet) Next() bool { return false }
+func (m *mockSeriesSet) At() promstorage.Series { return nil }
+func (m *mockSeriesSet) Err() error { return nil }
+
+func (q *mockQuerier) Select(
+ sortSeries bool,
+ hints *promstorage.SelectHints,
+ labelMatchers ...*labels.Matcher,
+) (promstorage.SeriesSet, promstorage.Warnings, error) {
+ if q.mockOptions.selectFn != nil {
+ return q.mockOptions.selectFn(sortSeries, hints, labelMatchers...)
+ }
+ return &mockSeriesSet{mockOptions: q.mockOptions}, nil, nil
+}
+
+func (*mockQuerier) LabelValues(name string) ([]string, promstorage.Warnings, error) {
+ return nil, nil, errors.New("not implemented")
+}
+
+func (*mockQuerier) LabelNames() ([]string, promstorage.Warnings, error) {
+ return nil, nil, errors.New("not implemented")
+}
+
+func (*mockQuerier) Close() error {
+ return nil
+}
+
+type mockOptions struct {
+ selectFn func(
+ sortSeries bool,
+ hints *promstorage.SelectHints,
+ labelMatchers ...*labels.Matcher,
+ ) (promstorage.SeriesSet, promstorage.Warnings, error)
+}
+
+type mockQueryable struct {
+ mockOptions
+}
+
+func (q *mockQueryable) Querier(_ context.Context, _, _ int64) (promstorage.Querier, error) {
+ return &mockQuerier{mockOptions: q.mockOptions}, nil
+}
+
+func newMockPromQLEngine() *promql.Engine {
+ var (
+ instrumentOpts = instrument.NewOptions()
+ kitLogger = kitlogzap.NewZapSugarLogger(instrumentOpts.Logger(), zapcore.InfoLevel)
+ opts = promql.EngineOpts{
+ Logger: log.With(kitLogger, "component", "query engine"),
+ MaxSamples: 100,
+ Timeout: 1 * time.Minute,
+ }
+ )
+ return promql.NewEngine(opts)
+}
diff --git a/src/query/api/v1/handler/prom/prom.go b/src/query/api/v1/handler/prom/prom.go
new file mode 100644
index 0000000000..001f1a82da
--- /dev/null
+++ b/src/query/api/v1/handler/prom/prom.go
@@ -0,0 +1,62 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prom
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/m3db/m3/src/query/api/v1/options"
+ "github.com/m3db/m3/src/query/storage/prometheus"
+
+ "github.com/prometheus/prometheus/promql"
+)
+
+// NB: since Prometheus engine is not brought up in the usual fashion,
+// default subquery evaluation interval is unset, causing div by 0 errors.
+func init() {
+ promql.SetDefaultEvaluationInterval(time.Minute)
+}
+
+// Options defines options for PromQL handler.
+type Options struct {
+ PromQLEngine *promql.Engine
+}
+
+// NewReadHandler creates a handler to handle PromQL requests.
+func NewReadHandler(opts Options, hOpts options.HandlerOptions) http.Handler {
+ queryable := prometheus.NewPrometheusQueryable(
+ prometheus.PrometheusOptions{
+ Storage: hOpts.Storage(),
+ InstrumentOptions: hOpts.InstrumentOpts(),
+ })
+ return newReadHandler(opts, hOpts, queryable)
+}
+
+// NewReadInstantHandler creates a handler to handle PromQL requests.
+func NewReadInstantHandler(opts Options, hOpts options.HandlerOptions) http.Handler {
+ queryable := prometheus.NewPrometheusQueryable(
+ prometheus.PrometheusOptions{
+ Storage: hOpts.Storage(),
+ InstrumentOptions: hOpts.InstrumentOpts(),
+ })
+ return newReadInstantHandler(opts, hOpts, queryable)
+}
diff --git a/src/query/api/v1/handler/prom/prom_test.go b/src/query/api/v1/handler/prom/prom_test.go
new file mode 100644
index 0000000000..2def5115e5
--- /dev/null
+++ b/src/query/api/v1/handler/prom/prom_test.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prom
+
+import (
+ "testing"
+
+ "github.com/prometheus/prometheus/promql"
+ "gotest.tools/assert"
+)
+
+func TestIntervalSet(t *testing.T) {
+ intervalMillis := promql.GetDefaultEvaluationInterval()
+ assert.Equal(t, int64(60*1000), intervalMillis)
+}
diff --git a/src/query/api/v1/handler/prom/read.go b/src/query/api/v1/handler/prom/read.go
new file mode 100644
index 0000000000..6525262846
--- /dev/null
+++ b/src/query/api/v1/handler/prom/read.go
@@ -0,0 +1,127 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package prom provides custom handlers that support the prometheus
+// query endpoints.
+package prom
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/native"
+ "github.com/m3db/m3/src/query/api/v1/options"
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/storage/prometheus"
+
+ "github.com/prometheus/prometheus/promql"
+ promstorage "github.com/prometheus/prometheus/storage"
+ "github.com/uber-go/tally"
+ "go.uber.org/zap"
+)
+
+type readHandler struct {
+ engine *promql.Engine
+ queryable promstorage.Queryable
+ hOpts options.HandlerOptions
+ scope tally.Scope
+ logger *zap.Logger
+}
+
+type readRequest struct {
+ query string
+ start, end time.Time
+ step, timeout time.Duration
+}
+
+func newReadHandler(
+ opts Options,
+ hOpts options.HandlerOptions,
+ queryable promstorage.Queryable,
+) http.Handler {
+ scope := hOpts.InstrumentOpts().MetricsScope().Tagged(
+ map[string]string{"handler": "prometheus-read"},
+ )
+ return &readHandler{
+ engine: opts.PromQLEngine,
+ queryable: queryable,
+ hOpts: hOpts,
+ scope: scope,
+ logger: hOpts.InstrumentOpts().Logger(),
+ }
+}
+
+func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ fetchOptions, fetchErr := h.hOpts.FetchOptionsBuilder().NewFetchOptions(r)
+ if fetchErr != nil {
+ respondError(w, fetchErr, http.StatusBadRequest)
+ return
+ }
+
+ request, perr := native.ParseRequest(ctx, r, false, h.hOpts)
+ if perr != nil {
+ respondError(w, perr, http.StatusBadRequest)
+ return
+ }
+
+ // NB (@shreyas): We put the FetchOptions in context so it can be
+ // retrieved in the queryable object as there is no other way to pass
+ // that through.
+ var resultMetadata block.ResultMetadata
+ ctx = context.WithValue(ctx, prometheus.FetchOptionsContextKey, fetchOptions)
+ ctx = context.WithValue(ctx, prometheus.BlockResultMetadataKey, &resultMetadata)
+
+ if request.Params.Timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, request.Params.Timeout)
+ defer cancel()
+ }
+
+ qry, err := h.engine.NewRangeQuery(
+ h.queryable,
+ request.Params.Query,
+ request.Params.Start,
+ request.Params.End,
+ request.Params.Step)
+ if err != nil {
+ h.logger.Error("error creating range query", zap.Error(err), zap.String("query", request.Params.Query))
+ respondError(w, err, http.StatusInternalServerError)
+ return
+ }
+ defer qry.Close()
+
+ res := qry.Exec(ctx)
+ if res.Err != nil {
+ h.logger.Error("error executing range query", zap.Error(res.Err), zap.String("query", request.Params.Query))
+ respondError(w, res.Err, http.StatusInternalServerError)
+ return
+ }
+
+ handleroptions.AddWarningHeaders(w, resultMetadata)
+
+ respond(w, &queryData{
+ Result: res.Value,
+ ResultType: res.Value.Type(),
+ }, res.Warnings)
+}
diff --git a/src/query/api/v1/handler/prom/read_instant.go b/src/query/api/v1/handler/prom/read_instant.go
new file mode 100644
index 0000000000..53682dce19
--- /dev/null
+++ b/src/query/api/v1/handler/prom/read_instant.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prom
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/api/v1/options"
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/storage/prometheus"
+ "github.com/m3db/m3/src/query/util"
+
+ "github.com/prometheus/prometheus/promql"
+ promstorage "github.com/prometheus/prometheus/storage"
+ "github.com/uber-go/tally"
+ "go.uber.org/zap"
+)
+
+type readInstantHandler struct {
+ queryable promstorage.Queryable
+ engine *promql.Engine
+ hOpts options.HandlerOptions
+ scope tally.Scope
+ logger *zap.Logger
+}
+
+func newReadInstantHandler(
+ opts Options,
+ hOpts options.HandlerOptions,
+ queryable promstorage.Queryable,
+) http.Handler {
+ scope := hOpts.InstrumentOpts().MetricsScope().Tagged(
+ map[string]string{"handler": "prometheus-read-instantaneous"},
+ )
+ return &readInstantHandler{
+ engine: opts.PromQLEngine,
+ queryable: queryable,
+ hOpts: hOpts,
+ scope: scope,
+ logger: hOpts.InstrumentOpts().Logger(),
+ }
+}
+
+func (h *readInstantHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ts, err := util.ParseTimeStringWithDefault(r.FormValue("time"), time.Now())
+ if err != nil {
+ respondError(w, err, http.StatusBadRequest)
+ return
+ }
+
+ fetchOptions, fetchErr := h.hOpts.FetchOptionsBuilder().NewFetchOptions(r)
+ if fetchErr != nil {
+ respondError(w, fetchErr, http.StatusBadRequest)
+ return
+ }
+
+ ctx := r.Context()
+ // NB (@shreyas): We put the FetchOptions in context so it can be
+ // retrieved in the queryable object as there is no other way to pass
+ // that through.
+ var resultMetadata block.ResultMetadata
+ ctx = context.WithValue(ctx, prometheus.FetchOptionsContextKey, fetchOptions)
+ ctx = context.WithValue(ctx, prometheus.BlockResultMetadataKey, &resultMetadata)
+
+ if t := r.FormValue("timeout"); t != "" {
+ timeout, err := util.ParseDurationString(t)
+ if err != nil {
+ err = fmt.Errorf("invalid parameter 'timeout': %v", err)
+ respondError(w, err, http.StatusBadRequest)
+ return
+ }
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ query := r.FormValue("query")
+ qry, err := h.engine.NewInstantQuery(
+ h.queryable,
+ query,
+ ts)
+ if err != nil {
+ h.logger.Error("error creating instant query", zap.Error(err), zap.String("query", query))
+ respondError(w, err, http.StatusInternalServerError)
+ return
+ }
+ defer qry.Close()
+
+ res := qry.Exec(ctx)
+ if res.Err != nil {
+ h.logger.Error("error executing instant query", zap.Error(res.Err), zap.String("query", query))
+ respondError(w, res.Err, http.StatusInternalServerError)
+ return
+ }
+
+ handleroptions.AddWarningHeaders(w, resultMetadata)
+
+ respond(w, &queryData{
+ Result: res.Value,
+ ResultType: res.Value.Type(),
+ }, res.Warnings)
+}
diff --git a/src/query/api/v1/handler/prom/read_test.go b/src/query/api/v1/handler/prom/read_test.go
new file mode 100644
index 0000000000..6176271d23
--- /dev/null
+++ b/src/query/api/v1/handler/prom/read_test.go
@@ -0,0 +1,261 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prom
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/native"
+ "github.com/m3db/m3/src/query/api/v1/options"
+ "github.com/m3db/m3/src/query/executor"
+ "github.com/m3db/m3/src/x/instrument"
+ "github.com/prometheus/prometheus/pkg/labels"
+ promstorage "github.com/prometheus/prometheus/storage"
+ "github.com/stretchr/testify/require"
+)
+
+const promQuery = `http_requests_total{job="prometheus",group="canary"}`
+
+var testPromQLEngine = newMockPromQLEngine()
+
+type testHandlers struct {
+ queryable *mockQueryable
+ readHandler http.Handler
+ readInstantHandler http.Handler
+}
+
+func setupTest(t *testing.T) testHandlers {
+ opts := Options{
+ PromQLEngine: testPromQLEngine,
+ }
+ timeoutOpts := &prometheus.TimeoutOpts{
+ FetchTimeout: 15 * time.Second,
+ }
+
+ fetchOptsBuilderCfg := handleroptions.FetchOptionsBuilderOptions{}
+ fetchOptsBuilder := handleroptions.NewFetchOptionsBuilder(fetchOptsBuilderCfg)
+ instrumentOpts := instrument.NewOptions()
+ engineOpts := executor.NewEngineOptions().
+ SetLookbackDuration(time.Minute).
+ SetGlobalEnforcer(nil).
+ SetInstrumentOptions(instrumentOpts)
+ engine := executor.NewEngine(engineOpts)
+ hOpts := options.EmptyHandlerOptions().
+ SetFetchOptionsBuilder(fetchOptsBuilder).
+ SetEngine(engine).
+ SetTimeoutOpts(timeoutOpts)
+ queryable := &mockQueryable{}
+ readHandler := newReadHandler(opts, hOpts, queryable)
+ readInstantHandler := newReadInstantHandler(opts, hOpts, queryable)
+ return testHandlers{
+ queryable: queryable,
+ readHandler: readHandler,
+ readInstantHandler: readInstantHandler,
+ }
+}
+
+func defaultParams() url.Values {
+ vals := url.Values{}
+ now := time.Now()
+ vals.Add(queryParam, promQuery)
+ vals.Add(startParam, now.Format(time.RFC3339))
+ vals.Add(endParam, string(now.Add(time.Hour).Format(time.RFC3339)))
+ vals.Add(handleroptions.StepParam, (time.Duration(10) * time.Second).String())
+ return vals
+}
+
+func defaultParamsWithoutQuery() url.Values {
+ vals := url.Values{}
+ now := time.Now()
+ vals.Add(startParam, now.Format(time.RFC3339))
+ vals.Add(endParam, string(now.Add(time.Hour).Format(time.RFC3339)))
+ vals.Add(handleroptions.StepParam, (time.Duration(10) * time.Second).String())
+ return vals
+}
+
+func TestPromReadHandler(t *testing.T) {
+ setup := setupTest(t)
+
+ req, _ := http.NewRequest("GET", native.PromReadURL, nil)
+ req.URL.RawQuery = defaultParams().Encode()
+
+ recorder := httptest.NewRecorder()
+ setup.readHandler.ServeHTTP(recorder, req)
+
+ var resp response
+ require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp))
+ require.Equal(t, statusSuccess, resp.Status)
+}
+
+func TestPromReadHandlerInvalidQuery(t *testing.T) {
+ setup := setupTest(t)
+
+ req, _ := http.NewRequest("GET", native.PromReadURL, nil)
+ req.URL.RawQuery = defaultParamsWithoutQuery().Encode()
+
+ recorder := httptest.NewRecorder()
+ setup.readHandler.ServeHTTP(recorder, req)
+
+ var resp response
+ require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp))
+ require.Equal(t, statusError, resp.Status)
+}
+
+func TestPromReadInstantHandler(t *testing.T) {
+ setup := setupTest(t)
+
+ req, _ := http.NewRequest("GET", native.PromReadInstantURL, nil)
+ req.URL.RawQuery = defaultParams().Encode()
+
+ recorder := httptest.NewRecorder()
+ setup.readInstantHandler.ServeHTTP(recorder, req)
+
+ var resp response
+ require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp))
+ require.Equal(t, statusSuccess, resp.Status)
+}
+
+func TestPromReadInstantHandlerInvalidQuery(t *testing.T) {
+ setup := setupTest(t)
+
+ req, _ := http.NewRequest("GET", native.PromReadInstantURL, nil)
+ req.URL.RawQuery = defaultParamsWithoutQuery().Encode()
+
+ recorder := httptest.NewRecorder()
+ setup.readInstantHandler.ServeHTTP(recorder, req)
+
+ var resp response
+ require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp))
+ require.Equal(t, statusError, resp.Status)
+}
+
+func TestPromReadInstantHandlerParseMinTime(t *testing.T) {
+ setup := setupTest(t)
+
+ var (
+ query *promstorage.SelectHints
+ selects int
+ )
+ setup.queryable.selectFn = func(
+ sortSeries bool,
+ hints *promstorage.SelectHints,
+ labelMatchers ...*labels.Matcher,
+ ) (promstorage.SeriesSet, promstorage.Warnings, error) {
+ selects++
+ query = hints
+ return &mockSeriesSet{}, nil, nil
+ }
+
+ req, _ := http.NewRequest("GET", native.PromReadInstantURL, nil)
+ params := defaultParams()
+ params.Set("time", minTimeFormatted)
+ req.URL.RawQuery = params.Encode()
+
+ var resp response
+ recorder := httptest.NewRecorder()
+
+ setup.readInstantHandler.ServeHTTP(recorder, req)
+
+ require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp))
+ require.Equal(t, statusSuccess, resp.Status)
+
+ require.Equal(t, 1, selects)
+
+ fudge := 5 * time.Minute // Need to account for lookback
+ expected := time.Unix(0, 0)
+ actual := millisTime(query.Start)
+ require.True(t, abs(expected.Sub(actual)) <= fudge,
+ fmt.Sprintf("expected=%v, actual=%v, fudge=%v, delta=%v",
+ expected, actual, fudge, expected.Sub(actual)))
+
+ fudge = 5 * time.Minute // Need to account for lookback
+ expected = time.Unix(0, 0)
+ actual = millisTime(query.Start)
+ require.True(t, abs(expected.Sub(actual)) <= fudge,
+ fmt.Sprintf("expected=%v, actual=%v, fudge=%v, delta=%v",
+ expected, actual, fudge, expected.Sub(actual)))
+}
+
+func TestPromReadInstantHandlerParseMaxTime(t *testing.T) {
+ setup := setupTest(t)
+
+ var (
+ query *promstorage.SelectHints
+ selects int
+ )
+ setup.queryable.selectFn = func(
+ sortSeries bool,
+ hints *promstorage.SelectHints,
+ labelMatchers ...*labels.Matcher,
+ ) (promstorage.SeriesSet, promstorage.Warnings, error) {
+ selects++
+ query = hints
+ return &mockSeriesSet{}, nil, nil
+ }
+
+ req, _ := http.NewRequest("GET", native.PromReadInstantURL, nil)
+ params := defaultParams()
+ params.Set("time", maxTimeFormatted)
+ req.URL.RawQuery = params.Encode()
+
+ var resp response
+ recorder := httptest.NewRecorder()
+
+ setup.readInstantHandler.ServeHTTP(recorder, req)
+
+ require.NoError(t, json.Unmarshal(recorder.Body.Bytes(), &resp))
+ require.Equal(t, statusSuccess, resp.Status)
+
+ require.Equal(t, 1, selects)
+
+ fudge := 6 * time.Minute // Need to account for lookback + time.Now() skew
+ expected := time.Now()
+ actual := millisTime(query.Start)
+ require.True(t, abs(expected.Sub(actual)) <= fudge,
+ fmt.Sprintf("expected=%v, actual=%v, fudge=%v, delta=%v",
+ expected, actual, fudge, expected.Sub(actual)))
+
+ fudge = 6 * time.Minute // Need to account for lookback + time.Now() skew
+ expected = time.Now()
+ actual = millisTime(query.Start)
+ require.True(t, abs(expected.Sub(actual)) <= fudge,
+ fmt.Sprintf("expected=%v, actual=%v, fudge=%v, delta=%v",
+ expected, actual, fudge, expected.Sub(actual)))
+}
+
+func abs(v time.Duration) time.Duration {
+ if v < 0 {
+ return v * -1
+ }
+ return v
+}
+
+func millisTime(timestampMilliseconds int64) time.Time {
+ return time.Unix(0, timestampMilliseconds*int64(time.Millisecond))
+}
diff --git a/src/query/api/v1/handler/prometheus/common.go b/src/query/api/v1/handler/prometheus/common.go
index f385cc01e3..5f01843ed2 100644
--- a/src/query/api/v1/handler/prometheus/common.go
+++ b/src/query/api/v1/handler/prometheus/common.go
@@ -25,24 +25,20 @@ import (
"fmt"
"io"
"io/ioutil"
- "math"
"net/http"
- "sort"
- "strconv"
- "strings"
"time"
"github.com/m3db/m3/src/query/errors"
"github.com/m3db/m3/src/query/models"
xpromql "github.com/m3db/m3/src/query/parser/promql"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/query/util"
"github.com/m3db/m3/src/query/util/json"
xhttp "github.com/m3db/m3/src/x/net/http"
"github.com/golang/snappy"
- "github.com/prometheus/prometheus/promql"
)
const (
@@ -110,7 +106,15 @@ func ParseRequestTimeout(
r *http.Request,
configFetchTimeout time.Duration,
) (time.Duration, error) {
- timeout := r.Header.Get("timeout")
+ var timeout string
+ if v := r.FormValue("timeout"); v != "" {
+ timeout = v
+ }
+ // Note: Header should take precedence.
+ if v := r.Header.Get("timeout"); v != "" {
+ timeout = v
+ }
+
if timeout == "" {
return configFetchTimeout, nil
}
@@ -144,12 +148,14 @@ func ParseTagCompletionParamsToQueries(
r *http.Request,
) (TagCompletionQueries, *xhttp.ParseError) {
tagCompletionQueries := TagCompletionQueries{}
- start, err := parseTimeWithDefault(r, "start", time.Time{})
+ start, err := util.ParseTimeStringWithDefault(r.FormValue("start"),
+ time.Unix(0, 0))
if err != nil {
return tagCompletionQueries, xhttp.NewParseError(err, http.StatusBadRequest)
}
- end, err := parseTimeWithDefault(r, "end", time.Now())
+ end, err := util.ParseTimeStringWithDefault(r.FormValue("end"),
+ time.Now())
if err != nil {
return tagCompletionQueries, xhttp.NewParseError(err, http.StatusBadRequest)
}
@@ -213,21 +219,10 @@ func parseTagCompletionQueries(r *http.Request) ([]string, error) {
return queries, nil
}
-func parseTimeWithDefault(
- r *http.Request,
- key string,
- defaultTime time.Time,
-) (time.Time, error) {
- if t := r.FormValue(key); t != "" {
- return util.ParseTimeString(t)
- }
-
- return defaultTime, nil
-}
-
// ParseSeriesMatchQuery parses all params from the GET request.
func ParseSeriesMatchQuery(
r *http.Request,
+ parseOpts xpromql.ParseOptions,
tagOptions models.TagOptions,
) ([]*storage.FetchQuery, *xhttp.ParseError) {
r.ParseForm()
@@ -236,19 +231,22 @@ func ParseSeriesMatchQuery(
return nil, xhttp.NewParseError(errors.ErrInvalidMatchers, http.StatusBadRequest)
}
- start, err := parseTimeWithDefault(r, "start", time.Time{})
+ start, err := util.ParseTimeStringWithDefault(r.FormValue("start"),
+ time.Unix(0, 0))
if err != nil {
return nil, xhttp.NewParseError(err, http.StatusBadRequest)
}
- end, err := parseTimeWithDefault(r, "end", time.Now())
+ end, err := util.ParseTimeStringWithDefault(r.FormValue("end"),
+ time.Now())
if err != nil {
return nil, xhttp.NewParseError(err, http.StatusBadRequest)
}
queries := make([]*storage.FetchQuery, len(matcherValues))
+ fn := parseOpts.MetricSelectorFn()
for i, s := range matcherValues {
- promMatchers, err := promql.ParseMetricSelector(s)
+ promMatchers, err := fn(s)
if err != nil {
return nil, xhttp.NewParseError(err, http.StatusBadRequest)
}
@@ -271,7 +269,7 @@ func ParseSeriesMatchQuery(
func renderNameOnlyTagCompletionResultsJSON(
w io.Writer,
- results []storage.CompletedTag,
+ results []consolidators.CompletedTag,
) error {
jw := json.NewWriter(w)
jw.BeginArray()
@@ -287,7 +285,7 @@ func renderNameOnlyTagCompletionResultsJSON(
func renderDefaultTagCompletionResultsJSON(
w io.Writer,
- results []storage.CompletedTag,
+ results []consolidators.CompletedTag,
) error {
jw := json.NewWriter(w)
jw.BeginObject()
@@ -323,7 +321,7 @@ func renderDefaultTagCompletionResultsJSON(
// RenderListTagResultsJSON renders list tag results to json format.
func RenderListTagResultsJSON(
w io.Writer,
- result *storage.CompleteTagsResult,
+ result *consolidators.CompleteTagsResult,
) error {
if !result.CompleteNameOnly {
return errors.ErrWithNames
@@ -351,7 +349,7 @@ func RenderListTagResultsJSON(
// RenderTagCompletionResultsJSON renders tag completion results to json format.
func RenderTagCompletionResultsJSON(
- w io.Writer, result storage.CompleteTagsResult) error {
+ w io.Writer, result consolidators.CompleteTagsResult) error {
results := result.CompletedTags
if result.CompleteNameOnly {
return renderNameOnlyTagCompletionResultsJSON(w, results)
@@ -363,7 +361,7 @@ func RenderTagCompletionResultsJSON(
// RenderTagValuesResultsJSON renders tag values results to json format.
func RenderTagValuesResultsJSON(
w io.Writer,
- result *storage.CompleteTagsResult,
+ result *consolidators.CompleteTagsResult,
) error {
if result.CompleteNameOnly {
return errors.ErrNamesOnly
@@ -444,222 +442,6 @@ func RenderSeriesMatchResultsJSON(
return jw.Close()
}
-// Response represents Prometheus's query response.
-type Response struct {
- // Status is the response status.
- Status string `json:"status"`
- // Data is the response data.
- Data data `json:"data"`
-}
-
-type data struct {
- // ResultType is the result type for the response.
- ResultType string `json:"resultType"`
- // Result is the list of results for the response.
- Result results `json:"result"`
-}
-
-type results []Result
-
-// Len is the number of elements in the collection.
-func (r results) Len() int { return len(r) }
-
-// Less reports whether the element with
-// index i should sort before the element with index j.
-func (r results) Less(i, j int) bool {
- return r[i].id < r[j].id
-}
-
-// Swap swaps the elements with indexes i and j.
-func (r results) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
-
-// Sort sorts the results.
-func (r results) Sort() {
- for i, result := range r {
- r[i] = result.genID()
- }
-
- sort.Sort(r)
-}
-
-// Result is the result itself.
-type Result struct {
- // Metric is the tags for the result.
- Metric Tags `json:"metric"`
- // Values is the set of values for the result.
- Values Values `json:"values"`
- id string
-}
-
-// Tags is a simple representation of Prometheus tags.
-type Tags map[string]string
-
-// Values is a list of values for the Prometheus result.
-type Values []Value
-
-// Value is a single value for Prometheus result.
-type Value []interface{}
-
-func (r *Result) genID() Result {
- tags := make(sort.StringSlice, len(r.Metric))
- for k, v := range r.Metric {
- tags = append(tags, fmt.Sprintf("%s:%s,", k, v))
- }
-
- sort.Sort(tags)
- var sb strings.Builder
- // NB: this may clash but exact tag values are also checked, and this is a
- // validation endpoint so there's less concern over correctness.
- for _, t := range tags {
- sb.WriteString(t)
- }
-
- r.id = sb.String()
- return *r
-}
-
-// MatchInformation describes how well two responses match.
-type MatchInformation struct {
- // FullMatch indicates a full match.
- FullMatch bool
- // NoMatch indicates that the responses do not match sufficiently.
- NoMatch bool
-}
-
-// Matches compares two responses and determines how closely they match.
-func (p Response) Matches(other Response) (MatchInformation, error) {
- if p.Status != other.Status {
- err := fmt.Errorf("status %s does not match other status %s",
- p.Status, other.Status)
- return MatchInformation{
- NoMatch: true,
- }, err
- }
-
- return p.Data.matches(other.Data)
-}
-
-func (d data) matches(other data) (MatchInformation, error) {
- if d.ResultType != other.ResultType {
- err := fmt.Errorf("result type %s does not match other result type %s",
- d.ResultType, other.ResultType)
- return MatchInformation{
- NoMatch: true,
- }, err
- }
-
- return d.Result.matches(other.Result)
-}
-
-func (r results) matches(other results) (MatchInformation, error) {
- if len(r) != len(other) {
- err := fmt.Errorf("result length %d does not match other result length %d",
- len(r), len(other))
- return MatchInformation{
- NoMatch: true,
- }, err
- }
-
- r.Sort()
- other.Sort()
- for i, result := range r {
- if err := result.matches(other[i]); err != nil {
- return MatchInformation{
- NoMatch: true,
- }, err
- }
- }
-
- return MatchInformation{FullMatch: true}, nil
-}
-
-func (r Result) matches(other Result) error {
- // NB: tags should match by here so this is more of a sanity check.
- if err := r.Metric.matches(other.Metric); err != nil {
- return err
- }
-
- return r.Values.matches(other.Values)
-}
-
-func (t Tags) matches(other Tags) error {
- if len(t) != len(other) {
- return fmt.Errorf("tag length %d does not match other tag length %d",
- len(t), len(other))
- }
-
- for k, v := range t {
- if vv, ok := other[k]; ok {
- if v != vv {
- return fmt.Errorf("tag %s does not match other tag length %s", v, vv)
- }
- } else {
- return fmt.Errorf("tag %s not found in other tagset", v)
- }
- }
-
- return nil
-}
-
-func (v Values) matches(other Values) error {
- if len(v) != len(other) {
- return fmt.Errorf("values length %d does not match other values length %d",
- len(v), len(other))
- }
-
- for i, val := range v {
- if err := val.matches(other[i]); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (v Value) matches(other Value) error {
- if len(v) != 2 {
- return fmt.Errorf("value length %d must be 2", len(v))
- }
-
- if len(other) != 2 {
- return fmt.Errorf("other value length %d must be 2", len(other))
- }
-
- tsV := fmt.Sprint(v[0])
- tsOther := fmt.Sprint(v[0])
- if tsV != tsOther {
- return fmt.Errorf("ts %s does not match other ts %s", tsV, tsOther)
- }
-
- valV, err := strconv.ParseFloat(fmt.Sprint(v[1]), 64)
- if err != nil {
- return err
- }
-
- valOther, err := strconv.ParseFloat(fmt.Sprint(other[1]), 64)
- if err != nil {
- return err
- }
-
- if math.Abs(valV-valOther) > tolerance {
- return fmt.Errorf("point %f does not match other point %f", valV, valOther)
- }
-
- for i, val := range v {
- otherVal := other[i]
- if val != otherVal {
- }
- }
-
- return nil
-}
-
-// PromDebug represents the input and output that are used in the debug endpoint.
-type PromDebug struct {
- Input Response `json:"input"`
- Results Response `json:"results"`
-}
-
// FilterSeriesByOptions removes series tags based on options.
func FilterSeriesByOptions(
series []*ts.Series,
diff --git a/src/query/api/v1/handler/prometheus/common_test.go b/src/query/api/v1/handler/prometheus/common_test.go
index dc4ec68ff0..5f3f993ca1 100644
--- a/src/query/api/v1/handler/prometheus/common_test.go
+++ b/src/query/api/v1/handler/prometheus/common_test.go
@@ -23,46 +23,51 @@ package prometheus
import (
"bytes"
"fmt"
+ "mime/multipart"
"net/http"
+ "net/http/httptest"
+ "net/url"
"strings"
"testing"
"time"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/test"
+ xhttp "github.com/m3db/m3/src/x/net/http"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestPromCompressedReadSuccess(t *testing.T) {
- req, _ := http.NewRequest("POST", "dummy", test.GeneratePromReadBody(t))
+ req := httptest.NewRequest("POST", "/dummy", test.GeneratePromReadBody(t))
_, err := ParsePromCompressedRequest(req)
assert.NoError(t, err)
}
func TestPromCompressedReadNoBody(t *testing.T) {
- req, _ := http.NewRequest("POST", "dummy", nil)
+ req := httptest.NewRequest("POST", "/dummy", nil)
_, err := ParsePromCompressedRequest(req)
assert.Error(t, err)
assert.Equal(t, err.Code(), http.StatusBadRequest)
}
func TestPromCompressedReadEmptyBody(t *testing.T) {
- req, _ := http.NewRequest("POST", "dummy", bytes.NewReader([]byte{}))
+ req := httptest.NewRequest("POST", "/dummy", bytes.NewReader([]byte{}))
_, err := ParsePromCompressedRequest(req)
assert.Error(t, err)
assert.Equal(t, err.Code(), http.StatusBadRequest)
}
func TestPromCompressedReadInvalidEncoding(t *testing.T) {
- req, _ := http.NewRequest("POST", "dummy", bytes.NewReader([]byte{'a'}))
+ req := httptest.NewRequest("POST", "/dummy", bytes.NewReader([]byte{'a'}))
_, err := ParsePromCompressedRequest(req)
assert.Error(t, err)
assert.Equal(t, err.Code(), http.StatusBadRequest)
}
-func TestTimeoutParse(t *testing.T) {
- req, _ := http.NewRequest("POST", "dummy", nil)
+func TestTimeoutParseWithHeader(t *testing.T) {
+ req := httptest.NewRequest("POST", "/dummy", nil)
req.Header.Add("timeout", "1ms")
timeout, err := ParseRequestTimeout(req, time.Second)
@@ -79,6 +84,34 @@ func TestTimeoutParse(t *testing.T) {
assert.Error(t, err)
}
+func TestTimeoutParseWithPostRequestParam(t *testing.T) {
+ params := url.Values{}
+ params.Add("timeout", "1ms")
+
+ buff := bytes.NewBuffer(nil)
+ form := multipart.NewWriter(buff)
+ form.WriteField("timeout", "1ms")
+ require.NoError(t, form.Close())
+
+ req := httptest.NewRequest("POST", "/dummy", buff)
+ req.Header.Set(xhttp.HeaderContentType, form.FormDataContentType())
+
+ timeout, err := ParseRequestTimeout(req, time.Second)
+ assert.NoError(t, err)
+ assert.Equal(t, timeout, time.Millisecond)
+}
+
+func TestTimeoutParseWithGetRequestParam(t *testing.T) {
+ params := url.Values{}
+ params.Add("timeout", "1ms")
+
+ req := httptest.NewRequest("GET", "/dummy?"+params.Encode(), nil)
+
+ timeout, err := ParseRequestTimeout(req, time.Second)
+ assert.NoError(t, err)
+ assert.Equal(t, timeout, time.Millisecond)
+}
+
type writer struct {
value string
}
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/fetch_options.go b/src/query/api/v1/handler/prometheus/handleroptions/fetch_options.go
index 8b830936a4..0c8db78124 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/fetch_options.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/fetch_options.go
@@ -32,6 +32,8 @@ import (
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/errors"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+ "github.com/m3db/m3/src/x/headers"
xhttp "github.com/m3db/m3/src/x/net/http"
)
@@ -54,7 +56,16 @@ type FetchOptionsBuilder interface {
// FetchOptionsBuilderOptions provides options to use when creating a
// fetch options builder.
type FetchOptionsBuilderOptions struct {
- Limit int
+ Limits FetchOptionsBuilderLimitsOptions
+ RestrictByTag *storage.RestrictByTag
+}
+
+// FetchOptionsBuilderLimitsOptions provides limits options to use when
+// creating a fetch options builder.
+type FetchOptionsBuilderLimitsOptions struct {
+ SeriesLimit int
+ DocsLimit int
+ RequireExhaustive bool
}
type fetchOptionsBuilder struct {
@@ -69,45 +80,87 @@ func NewFetchOptionsBuilder(
}
// ParseLimit parses request limit from either header or query string.
-func ParseLimit(req *http.Request, defaultLimit int) (int, error) {
- if str := req.Header.Get(LimitMaxSeriesHeader); str != "" {
+func ParseLimit(req *http.Request, header, formValue string, defaultLimit int) (int, error) {
+ if str := req.Header.Get(header); str != "" {
n, err := strconv.Atoi(str)
if err != nil {
err = fmt.Errorf(
"could not parse limit: input=%s, err=%v", str, err)
return 0, err
}
-
return n, nil
}
- if str := req.URL.Query().Get("limit"); str != "" {
+ if str := req.FormValue(formValue); str != "" {
n, err := strconv.Atoi(str)
if err != nil {
err = fmt.Errorf(
"could not parse limit: input=%s, err=%v", str, err)
return 0, err
}
-
return n, nil
}
return defaultLimit, nil
}
+// ParseRequireExhaustive parses request limit require exhaustive from header or
+// query string.
+func ParseRequireExhaustive(req *http.Request, defaultValue bool) (bool, error) {
+ if str := req.Header.Get(headers.LimitRequireExhaustiveHeader); str != "" {
+ v, err := strconv.ParseBool(str)
+ if err != nil {
+ err = fmt.Errorf(
+ "could not parse limit: input=%s, err=%v", str, err)
+ return false, err
+ }
+ return v, nil
+ }
+
+ if str := req.FormValue("requireExhaustive"); str != "" {
+ v, err := strconv.ParseBool(str)
+ if err != nil {
+ err = fmt.Errorf(
+ "could not parse limit: input=%s, err=%v", str, err)
+ return false, err
+ }
+ return v, nil
+ }
+
+ return defaultValue, nil
+}
+
// NewFetchOptions parses an http request into fetch options.
func (b fetchOptionsBuilder) NewFetchOptions(
req *http.Request,
) (*storage.FetchOptions, *xhttp.ParseError) {
fetchOpts := storage.NewFetchOptions()
- limit, err := ParseLimit(req, b.opts.Limit)
+
+ seriesLimit, err := ParseLimit(req, headers.LimitMaxSeriesHeader,
+ "limit", b.opts.Limits.SeriesLimit)
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ fetchOpts.SeriesLimit = seriesLimit
+
+ docsLimit, err := ParseLimit(req, headers.LimitMaxDocsHeader,
+ "docsLimit", b.opts.Limits.DocsLimit)
if err != nil {
return nil, xhttp.NewParseError(err, http.StatusBadRequest)
}
- fetchOpts.Limit = limit
- if str := req.Header.Get(MetricsTypeHeader); str != "" {
- mt, err := storage.ParseMetricsType(str)
+ fetchOpts.DocsLimit = docsLimit
+
+ requireExhaustive, err := ParseRequireExhaustive(req, b.opts.Limits.RequireExhaustive)
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ fetchOpts.RequireExhaustive = requireExhaustive
+
+ if str := req.Header.Get(headers.MetricsTypeHeader); str != "" {
+ mt, err := storagemetadata.ParseMetricsType(str)
if err != nil {
err = fmt.Errorf(
"could not parse metrics type: input=%s, err=%v", str, err)
@@ -120,7 +173,7 @@ func (b fetchOptionsBuilder) NewFetchOptions(
fetchOpts.RestrictQueryOptions.RestrictByType.MetricsType = mt
}
- if str := req.Header.Get(MetricsStoragePolicyHeader); str != "" {
+ if str := req.Header.Get(headers.MetricsStoragePolicyHeader); str != "" {
sp, err := policy.ParseStoragePolicy(str)
if err != nil {
err = fmt.Errorf(
@@ -134,19 +187,24 @@ func (b fetchOptionsBuilder) NewFetchOptions(
fetchOpts.RestrictQueryOptions.RestrictByType.StoragePolicy = sp
}
- if str := req.Header.Get(RestrictByTagsJSONHeader); str != "" {
+ if str := req.Header.Get(headers.RestrictByTagsJSONHeader); str != "" {
+ // Allow header to override any default restrict by tags config.
var opts StringTagOptions
if err := json.Unmarshal([]byte(str), &opts); err != nil {
return nil, xhttp.NewParseError(err, http.StatusBadRequest)
}
- tagOpts, err := opts.toOptions()
+ tagOpts, err := opts.StorageOptions()
if err != nil {
return nil, xhttp.NewParseError(err, http.StatusBadRequest)
}
fetchOpts.RestrictQueryOptions = newOrExistingRestrictQueryOptions(fetchOpts)
fetchOpts.RestrictQueryOptions.RestrictByTag = tagOpts
+ } else if defaultTagOpts := b.opts.RestrictByTag; defaultTagOpts != nil {
+ // Apply defaults if not overridden by header.
+ fetchOpts.RestrictQueryOptions = newOrExistingRestrictQueryOptions(fetchOpts)
+ fetchOpts.RestrictQueryOptions.RestrictByTag = defaultTagOpts
}
if restrict := fetchOpts.RestrictQueryOptions; restrict != nil {
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/fetch_options_test.go b/src/query/api/v1/handler/prometheus/handleroptions/fetch_options_test.go
index 34337f2266..427c78ad1a 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/fetch_options_test.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/fetch_options_test.go
@@ -26,12 +26,15 @@ import (
"net/http"
"net/http/httptest"
"net/url"
+ "regexp"
"testing"
"time"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+ "github.com/m3db/m3/src/x/headers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -43,14 +46,15 @@ func TestFetchOptionsBuilder(t *testing.T) {
}
tests := []struct {
- name string
- defaultLimit int
- headers map[string]string
- query string
- expectedLimit int
- expectedRestrict *storage.RestrictQueryOptions
- expectedLookback *expectedLookback
- expectedErr bool
+ name string
+ defaultLimit int
+ defaultRestrictByTag *storage.RestrictByTag
+ headers map[string]string
+ query string
+ expectedLimit int
+ expectedRestrict *storage.RestrictQueryOptions
+ expectedLookback *expectedLookback
+ expectedErr bool
}{
{
name: "default limit with no headers",
@@ -62,7 +66,7 @@ func TestFetchOptionsBuilder(t *testing.T) {
name: "limit with header",
defaultLimit: 42,
headers: map[string]string{
- LimitMaxSeriesHeader: "4242",
+ headers.LimitMaxSeriesHeader: "4242",
},
expectedLimit: 4242,
},
@@ -70,30 +74,30 @@ func TestFetchOptionsBuilder(t *testing.T) {
name: "bad header",
defaultLimit: 42,
headers: map[string]string{
- LimitMaxSeriesHeader: "not_a_number",
+ headers.LimitMaxSeriesHeader: "not_a_number",
},
expectedErr: true,
},
{
name: "unaggregated metrics type",
headers: map[string]string{
- MetricsTypeHeader: storage.UnaggregatedMetricsType.String(),
+ headers.MetricsTypeHeader: storagemetadata.UnaggregatedMetricsType.String(),
},
expectedRestrict: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.UnaggregatedMetricsType,
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
},
},
},
{
name: "aggregated metrics type",
headers: map[string]string{
- MetricsTypeHeader: storage.AggregatedMetricsType.String(),
- MetricsStoragePolicyHeader: "1m:14d",
+ headers.MetricsTypeHeader: storagemetadata.AggregatedMetricsType.String(),
+ headers.MetricsStoragePolicyHeader: "1m:14d",
},
expectedRestrict: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.MustParseStoragePolicy("1m:14d"),
},
},
@@ -101,22 +105,22 @@ func TestFetchOptionsBuilder(t *testing.T) {
{
name: "unaggregated metrics type with storage policy",
headers: map[string]string{
- MetricsTypeHeader: storage.UnaggregatedMetricsType.String(),
- MetricsStoragePolicyHeader: "1m:14d",
+ headers.MetricsTypeHeader: storagemetadata.UnaggregatedMetricsType.String(),
+ headers.MetricsStoragePolicyHeader: "1m:14d",
},
expectedErr: true,
},
{
name: "aggregated metrics type without storage policy",
headers: map[string]string{
- MetricsTypeHeader: storage.AggregatedMetricsType.String(),
+ headers.MetricsTypeHeader: storagemetadata.AggregatedMetricsType.String(),
},
expectedErr: true,
},
{
name: "unrecognized metrics type",
headers: map[string]string{
- MetricsTypeHeader: "foo",
+ headers.MetricsTypeHeader: "foo",
},
expectedErr: true,
},
@@ -149,12 +153,72 @@ func TestFetchOptionsBuilder(t *testing.T) {
query: "lookback=step&step=-1",
expectedErr: true,
},
+ {
+ name: "restrict by tags json header",
+ headers: map[string]string{
+ headers.RestrictByTagsJSONHeader: stripSpace(`{
+ "match":[{"name":"foo", "value":"bar", "type":"EQUAL"}],
+ "strip":["foo"]
+ }`),
+ },
+ expectedRestrict: &storage.RestrictQueryOptions{
+ RestrictByTag: &storage.RestrictByTag{
+ Restrict: models.Matchers{
+ mustMatcher("foo", "bar", models.MatchEqual),
+ },
+ Strip: toStrip("foo"),
+ },
+ },
+ },
+ {
+ name: "restrict by tags json defaults",
+ defaultRestrictByTag: &storage.RestrictByTag{
+ Restrict: models.Matchers{
+ mustMatcher("foo", "bar", models.MatchEqual),
+ },
+ Strip: toStrip("foo"),
+ },
+ expectedRestrict: &storage.RestrictQueryOptions{
+ RestrictByTag: &storage.RestrictByTag{
+ Restrict: models.Matchers{
+ mustMatcher("foo", "bar", models.MatchEqual),
+ },
+ Strip: toStrip("foo"),
+ },
+ },
+ },
+ {
+ name: "restrict by tags json default override by header",
+ defaultRestrictByTag: &storage.RestrictByTag{
+ Restrict: models.Matchers{
+ mustMatcher("foo", "bar", models.MatchEqual),
+ },
+ Strip: toStrip("foo"),
+ },
+ headers: map[string]string{
+ headers.RestrictByTagsJSONHeader: stripSpace(`{
+ "match":[{"name":"qux", "value":"qaz", "type":"EQUAL"}],
+ "strip":["qux"]
+ }`),
+ },
+ expectedRestrict: &storage.RestrictQueryOptions{
+ RestrictByTag: &storage.RestrictByTag{
+ Restrict: models.Matchers{
+ mustMatcher("qux", "qaz", models.MatchEqual),
+ },
+ Strip: toStrip("qux"),
+ },
+ },
+ },
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
builder := NewFetchOptionsBuilder(FetchOptionsBuilderOptions{
- Limit: test.defaultLimit,
+ Limits: FetchOptionsBuilderLimitsOptions{
+ SeriesLimit: test.defaultLimit,
+ },
+ RestrictByTag: test.defaultRestrictByTag,
})
url := "/foo"
@@ -170,7 +234,7 @@ func TestFetchOptionsBuilder(t *testing.T) {
if !test.expectedErr {
require.NoError(t, err)
- require.Equal(t, test.expectedLimit, opts.Limit)
+ require.Equal(t, test.expectedLimit, opts.SeriesLimit)
if test.expectedRestrict == nil {
require.Nil(t, opts.RestrictQueryOptions)
} else {
@@ -281,9 +345,9 @@ func TestFetchOptionsWithHeader(t *testing.T) {
}
headers := map[string]string{
- MetricsTypeHeader: storage.AggregatedMetricsType.String(),
- MetricsStoragePolicyHeader: "1m:14d",
- RestrictByTagsJSONHeader: `{
+ headers.MetricsTypeHeader: storagemetadata.AggregatedMetricsType.String(),
+ headers.MetricsStoragePolicyHeader: "1m:14d",
+ headers.RestrictByTagsJSONHeader: `{
"match":[
{"name":"a", "value":"b", "type":"EQUAL"},
{"name":"c", "value":"d", "type":"NOTEQUAL"},
@@ -296,7 +360,11 @@ func TestFetchOptionsWithHeader(t *testing.T) {
}`,
}
- builder := NewFetchOptionsBuilder(FetchOptionsBuilderOptions{Limit: 5})
+ builder := NewFetchOptionsBuilder(FetchOptionsBuilderOptions{
+ Limits: FetchOptionsBuilderLimitsOptions{
+ SeriesLimit: 5,
+ },
+ })
req := httptest.NewRequest("GET", "/", nil)
for k, v := range headers {
req.Header.Add(k, v)
@@ -307,7 +375,7 @@ func TestFetchOptionsWithHeader(t *testing.T) {
require.NotNil(t, opts.RestrictQueryOptions)
ex := &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.MustParseStoragePolicy("1m:14d"),
},
RestrictByTag: &storage.RestrictByTag{
@@ -325,3 +393,7 @@ func TestFetchOptionsWithHeader(t *testing.T) {
require.Equal(t, ex, opts.RestrictQueryOptions)
}
+
+func stripSpace(str string) string {
+ return regexp.MustCompile(`\s+`).ReplaceAllString(str, "")
+}
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/header_test.go b/src/query/api/v1/handler/prometheus/handleroptions/header_test.go
index a8829115e8..89d90a5778 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/header_test.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/header_test.go
@@ -26,6 +26,7 @@ import (
"testing"
"github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/x/headers"
"github.com/stretchr/testify/assert"
)
@@ -38,22 +39,22 @@ func TestAddWarningHeaders(t *testing.T) {
recorder = httptest.NewRecorder()
meta.Exhaustive = false
- ex := LimitHeaderSeriesLimitApplied
+ ex := headers.LimitHeaderSeriesLimitApplied
AddWarningHeaders(recorder, meta)
assert.Equal(t, 1, len(recorder.Header()))
- assert.Equal(t, ex, recorder.Header().Get(LimitHeader))
+ assert.Equal(t, ex, recorder.Header().Get(headers.LimitHeader))
recorder = httptest.NewRecorder()
meta.AddWarning("foo", "bar")
- ex = fmt.Sprintf("%s,%s_%s", LimitHeaderSeriesLimitApplied, "foo", "bar")
+ ex = fmt.Sprintf("%s,%s_%s", headers.LimitHeaderSeriesLimitApplied, "foo", "bar")
AddWarningHeaders(recorder, meta)
assert.Equal(t, 1, len(recorder.Header()))
- assert.Equal(t, ex, recorder.Header().Get(LimitHeader))
+ assert.Equal(t, ex, recorder.Header().Get(headers.LimitHeader))
recorder = httptest.NewRecorder()
meta.Exhaustive = true
ex = "foo_bar"
AddWarningHeaders(recorder, meta)
assert.Equal(t, 1, len(recorder.Header()))
- assert.Equal(t, ex, recorder.Header().Get(LimitHeader))
+ assert.Equal(t, ex, recorder.Header().Get(headers.LimitHeader))
}
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/headers.go b/src/query/api/v1/handler/prometheus/handleroptions/headers.go
index 4a976fa064..0694db73ab 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/headers.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/headers.go
@@ -25,66 +25,7 @@ import (
"strings"
"github.com/m3db/m3/src/query/block"
-)
-
-const (
- // WarningsHeader is the M3 warnings header when to display a warning to a user.
- WarningsHeader = "M3-Warnings"
-
- // RetryHeader is the M3 retry header to display when it is safe to retry.
- RetryHeader = "M3-Retry"
-
- // ServedByHeader is the M3 query storage execution breakdown.
- ServedByHeader = "M3-Storage-By"
-
- // DeprecatedHeader is the M3 deprecated header.
- DeprecatedHeader = "M3-Deprecated"
-
- // MetricsTypeHeader sets the write or read metrics type to restrict
- // metrics to.
- // Valid values are "unaggregated" or "aggregated".
- MetricsTypeHeader = "M3-Metrics-Type"
-
- // MetricsStoragePolicyHeader specifies the resolution and retention of
- // metrics being written or read.
- // In the form of a storage policy string, e.g. "1m:14d".
- // Only required if the metrics type header does not specify unaggregated
- // metrics type.
- MetricsStoragePolicyHeader = "M3-Storage-Policy"
-
- // RestrictByTagsJSONHeader provides tag options to enforces on queries,
- // in JSON format. See `handler.stringTagOptions` for definitions.`
- RestrictByTagsJSONHeader = "M3-Restrict-By-Tags-JSON"
-
- // LimitMaxSeriesHeader is the M3 limit timeseries header that limits
- // the number of time series returned by each storage node.
- LimitMaxSeriesHeader = "M3-Limit-Max-Series"
-
- // UnaggregatedStoragePolicy specifies the unaggregated storage policy.
- UnaggregatedStoragePolicy = "unaggregated"
-
- // DefaultServiceEnvironment is the default service ID environment.
- DefaultServiceEnvironment = "default_env"
- // DefaultServiceZone is the default service ID zone.
- DefaultServiceZone = "embedded"
-
- // HeaderClusterEnvironmentName is the header used to specify the environment
- // name.
- HeaderClusterEnvironmentName = "Cluster-Environment-Name"
- // HeaderClusterZoneName is the header used to specify the zone name.
- HeaderClusterZoneName = "Cluster-Zone-Name"
- // HeaderDryRun is the header used to specify whether this should be a dry
- // run.
- HeaderDryRun = "Dry-Run"
- // HeaderForce is the header used to specify whether this should be a forced operation.
- HeaderForce = "Force"
-
- // LimitHeader is the header added when returned series are limited.
- LimitHeader = "M3-Results-Limited"
-
- // LimitHeaderSeriesLimitApplied is the header applied when fetch results are
- // maxed.
- LimitHeaderSeriesLimitApplied = "max_fetch_series_limit_applied"
+ "github.com/m3db/m3/src/x/headers"
)
// AddWarningHeaders adds any warning headers present in the result's metadata.
@@ -102,12 +43,12 @@ func AddWarningHeaders(w http.ResponseWriter, meta block.ResultMetadata) {
warnings := make([]string, 0, warns)
if !ex {
- warnings = append(warnings, LimitHeaderSeriesLimitApplied)
+ warnings = append(warnings, headers.LimitHeaderSeriesLimitApplied)
}
for _, warn := range meta.Warnings {
warnings = append(warnings, warn.Header())
}
- w.Header().Set(LimitHeader, strings.Join(warnings, ","))
+ w.Header().Set(headers.LimitHeader, strings.Join(warnings, ","))
}
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/options.go b/src/query/api/v1/handler/prometheus/handleroptions/options.go
index b0f3e41c9a..03cca32005 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/options.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/options.go
@@ -22,6 +22,8 @@ package handleroptions
import (
"time"
+
+ "github.com/m3db/m3/src/x/retry"
)
// PromWriteHandlerForwardingOptions is the forwarding
@@ -30,6 +32,7 @@ type PromWriteHandlerForwardingOptions struct {
// MaxConcurrency is the max parallel forwarding and if zero will be unlimited.
MaxConcurrency int `yaml:"maxConcurrency"`
Timeout time.Duration `yaml:"timeout"`
+ Retry *retry.Configuration `yaml:"retry"`
Targets []PromWriteHandlerForwardTargetOptions `yaml:"targets"`
}
@@ -40,4 +43,6 @@ type PromWriteHandlerForwardTargetOptions struct {
URL string `yaml:"url"`
// Method defaults to POST if not set.
Method string `yaml:"method"`
+ // Headers to send along with requests to the target.
+ Headers map[string]string `yaml:"headers"`
}
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/service_options.go b/src/query/api/v1/handler/prometheus/handleroptions/service_options.go
index bed2671dad..bff792d87c 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/service_options.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/service_options.go
@@ -26,7 +26,9 @@ import (
"strings"
"time"
+ "github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cluster/services"
+ "github.com/m3db/m3/src/x/headers"
)
const (
@@ -100,13 +102,13 @@ type ServiceNameAndDefaults struct {
// values.
func NewServiceOptions(
service ServiceNameAndDefaults,
- headers http.Header,
+ header http.Header,
m3AggOpts *M3AggServiceOptions,
) ServiceOptions {
opts := ServiceOptions{
ServiceName: service.ServiceName,
- ServiceEnvironment: DefaultServiceEnvironment,
- ServiceZone: DefaultServiceZone,
+ ServiceEnvironment: headers.DefaultServiceEnvironment,
+ ServiceZone: headers.DefaultServiceZone,
DryRun: false,
Force: false,
@@ -120,16 +122,16 @@ func NewServiceOptions(
opts = applyDefault(opts)
}
- if v := strings.TrimSpace(headers.Get(HeaderClusterEnvironmentName)); v != "" {
+ if v := strings.TrimSpace(header.Get(headers.HeaderClusterEnvironmentName)); v != "" {
opts.ServiceEnvironment = v
}
- if v := strings.TrimSpace(headers.Get(HeaderClusterZoneName)); v != "" {
+ if v := strings.TrimSpace(header.Get(headers.HeaderClusterZoneName)); v != "" {
opts.ServiceZone = v
}
- if v := strings.TrimSpace(headers.Get(HeaderDryRun)); v == "true" {
+ if v := strings.TrimSpace(header.Get(headers.HeaderDryRun)); v == "true" {
opts.DryRun = true
}
- if v := strings.TrimSpace(headers.Get(HeaderForce)); v == "true" {
+ if v := strings.TrimSpace(header.Get(headers.HeaderForce)); v == "true" {
opts.Force = true
}
@@ -170,3 +172,10 @@ func (opts *ServiceOptions) ServiceID() services.ServiceID {
SetEnvironment(opts.ServiceEnvironment).
SetZone(opts.ServiceZone)
}
+
+// KVOverrideOptions constructs KV overrides from the current service options.
+func (opts *ServiceOptions) KVOverrideOptions() kv.OverrideOptions {
+ return kv.NewOverrideOptions().
+ SetEnvironment(opts.ServiceEnvironment).
+ SetZone(opts.ServiceZone)
+}
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/service_options_test.go b/src/query/api/v1/handler/prometheus/handleroptions/service_options_test.go
index 055cc1c02a..682563756e 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/service_options_test.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/service_options_test.go
@@ -25,6 +25,8 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/x/headers"
+
"github.com/stretchr/testify/assert"
)
@@ -39,8 +41,8 @@ func TestNewServiceOptions(t *testing.T) {
service: "foo",
exp: ServiceOptions{
ServiceName: "foo",
- ServiceEnvironment: DefaultServiceEnvironment,
- ServiceZone: DefaultServiceZone,
+ ServiceEnvironment: headers.DefaultServiceEnvironment,
+ ServiceZone: headers.DefaultServiceZone,
M3Agg: &M3AggServiceOptions{
MaxAggregationWindowSize: time.Minute,
},
@@ -49,9 +51,9 @@ func TestNewServiceOptions(t *testing.T) {
{
service: "foo",
headers: map[string]string{
- HeaderClusterEnvironmentName: "bar",
- HeaderClusterZoneName: "baz",
- HeaderDryRun: "true",
+ headers.HeaderClusterEnvironmentName: "bar",
+ headers.HeaderClusterZoneName: "baz",
+ headers.HeaderDryRun: "true",
},
aggOpts: &M3AggServiceOptions{
MaxAggregationWindowSize: 2 * time.Minute,
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/tag_options.go b/src/query/api/v1/handler/prometheus/handleroptions/tag_options.go
index 318dc5068d..0eeb8ae247 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/tag_options.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/tag_options.go
@@ -61,7 +61,14 @@ func (m StringMatch) toMatcher() (models.Matcher, error) {
return models.NewMatcher(t, []byte(m.Name), []byte(m.Value))
}
-func (o *StringTagOptions) toOptions() (*storage.RestrictByTag, error) {
+// Validate validates the string tag options.
+func (o *StringTagOptions) Validate() error {
+ _, err := o.StorageOptions()
+ return err
+}
+
+// StorageOptions returns the corresponding storage.RestrictByTag options.
+func (o *StringTagOptions) StorageOptions() (*storage.RestrictByTag, error) {
if len(o.Restrict) == 0 && len(o.Strip) == 0 {
return nil, nil
}
@@ -111,3 +118,96 @@ type StringTagOptions struct {
Restrict []StringMatch `json:"match"`
Strip []string `json:"strip"`
}
+
+// MapTagsOptions representations mutations to be applied to all timeseries in a
+// write request.
+type MapTagsOptions struct {
+ TagMappers []TagMapper `json:"tagMappers"`
+}
+
+// TagMapper represents one of a variety of tag mapping operations.
+type TagMapper struct {
+ Write WriteOp `json:"write,omitEmpty"`
+ Drop DropOp `json:"drop,omitEmpty"`
+ DropWithValue DropWithValueOp `json:"dropWithValue,omitEmpty"`
+ Replace ReplaceOp `json:"replace,omitEmpty"`
+}
+
+// Validate ensures the mapper is valid.
+func (t TagMapper) Validate() error {
+ numOps := 0
+ if !t.Write.IsEmpty() {
+ numOps++
+ }
+
+ if !t.Drop.IsEmpty() {
+ numOps++
+ }
+
+ if !t.DropWithValue.IsEmpty() {
+ numOps++
+ }
+
+ if !t.Replace.IsEmpty() {
+ numOps++
+ }
+
+ if numOps == 1 {
+ return nil
+ }
+
+ return fmt.Errorf("must specify one operation per tag mapper (got %d)", numOps)
+}
+
+// WriteOp with value tag="foo" and value="bar" will unconditionally add
+// tag-value pair "foo":"bar" to all timeseries included in the write request.
+// Any timeseries with a non-empty "foo" tag will have its value for that tag
+// replaced.
+type WriteOp struct {
+ Tag string `json:"tag"`
+ Value string `json:"value"`
+}
+
+// IsEmpty returns true if the operation is empty.
+func (op WriteOp) IsEmpty() bool {
+ return op.Tag == "" && op.Value == ""
+}
+
+// DropOp with tag="foo" and an empty value will remove all tag-value pairs in
+// all timeseries in the write request where the tag was "foo".
+type DropOp struct {
+ Tag string `json:"tag"`
+}
+
+// IsEmpty returns true if the operation is empty.
+func (op DropOp) IsEmpty() bool {
+ return op.Tag == ""
+}
+
+// DropWithValueOp will remove all tag-value pairs in all timeseries in the
+// writer equest if and only if the tag AND value in the timeseries is equal to
+// those on the operation.
+type DropWithValueOp struct {
+ Tag string `json:"tag"`
+ Value string `json:"value"`
+}
+
+// IsEmpty returns true if the operation is empty.
+func (op DropWithValueOp) IsEmpty() bool {
+ return op.Tag == "" && op.Value == ""
+}
+
+// ReplaceOp with tag="foo", an empty old field, and a non-empty new field will
+// unconditionally replace the value of any tag-value pair of any timeseries in
+// the write request where the tag is "foo" with the value of new. If old is
+// non-empty, a value will only be replaced if the value was equal to old.
+type ReplaceOp struct {
+ Tag string `json:"tag"`
+ OldValue string `json:"old"`
+ NewValue string `json:"new"`
+}
+
+// IsEmpty returns true if the operation is empty.
+func (op ReplaceOp) IsEmpty() bool {
+ return op.Tag == "" && op.OldValue == "" && op.NewValue == ""
+}
diff --git a/src/query/api/v1/handler/prometheus/handleroptions/tag_options_test.go b/src/query/api/v1/handler/prometheus/handleroptions/tag_options_test.go
index 7b8d788c8c..e631999c18 100644
--- a/src/query/api/v1/handler/prometheus/handleroptions/tag_options_test.go
+++ b/src/query/api/v1/handler/prometheus/handleroptions/tag_options_test.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -131,7 +132,14 @@ func TestParse(t *testing.T) {
err := json.Unmarshal([]byte(tt.json), &opts)
require.NoError(t, err)
- a, err := opts.toOptions()
+ validateErr := opts.Validate()
+ if tt.expectedError {
+ require.Error(t, validateErr)
+ } else {
+ require.NoError(t, validateErr)
+ }
+
+ a, err := opts.StorageOptions()
if tt.expectedError {
require.Error(t, err)
require.Nil(t, a)
@@ -141,3 +149,44 @@ func TestParse(t *testing.T) {
}
}
}
+
+func TestTagMapperValidate(t *testing.T) {
+ tm := TagMapper{}
+ assert.Error(t, tm.Validate())
+
+ tm.Write = WriteOp{Tag: "foo", Value: "bar"}
+ assert.NoError(t, tm.Validate())
+
+ tm.Drop = DropOp{Tag: "foo"}
+ assert.Error(t, tm.Validate())
+}
+
+func TestOpIsEmpty(t *testing.T) {
+ t.Run("Append", func(t *testing.T) {
+ op := WriteOp{}
+ assert.True(t, op.IsEmpty())
+ op.Tag = "foo"
+ assert.False(t, op.IsEmpty())
+ })
+
+ t.Run("Drop", func(t *testing.T) {
+ op := DropOp{}
+ assert.True(t, op.IsEmpty())
+ op.Tag = "foo"
+ assert.False(t, op.IsEmpty())
+ })
+
+ t.Run("DropWithValue", func(t *testing.T) {
+ op := DropWithValueOp{}
+ assert.True(t, op.IsEmpty())
+ op.Value = "foo"
+ assert.False(t, op.IsEmpty())
+ })
+
+ t.Run("Replace", func(t *testing.T) {
+ op := ReplaceOp{}
+ assert.True(t, op.IsEmpty())
+ op.Tag = "foo"
+ assert.False(t, op.IsEmpty())
+ })
+}
diff --git a/src/query/api/v1/handler/prometheus/native/common.go b/src/query/api/v1/handler/prometheus/native/common.go
index 5968972f9f..bae194745c 100644
--- a/src/query/api/v1/handler/prometheus/native/common.go
+++ b/src/query/api/v1/handler/prometheus/native/common.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/query/api/v1/handler/prometheus"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/errors"
"github.com/m3db/m3/src/query/executor"
"github.com/m3db/m3/src/query/functions/utils"
@@ -40,6 +41,7 @@ import (
"github.com/m3db/m3/src/query/util"
"github.com/m3db/m3/src/query/util/json"
"github.com/m3db/m3/src/query/util/logging"
+ "github.com/m3db/m3/src/x/headers"
"github.com/m3db/m3/src/x/instrument"
xhttp "github.com/m3db/m3/src/x/net/http"
@@ -151,7 +153,7 @@ func parseParams(
params.IncludeEnd = !excludeEnd
}
- if strings.ToLower(r.Header.Get("X-M3-Render-Format")) == "m3ql" {
+ if strings.ToLower(r.Header.Get(headers.RenderFormat)) == "m3ql" {
params.FormatType = models.FormatM3QL
}
@@ -289,15 +291,27 @@ func filterNaNSeries(
return filtered
}
-func renderResultsJSON(
+// RenderResultsOptions is a set of options for rendering the result.
+type RenderResultsOptions struct {
+ KeepNaNs bool
+ Start time.Time
+ End time.Time
+}
+
+// RenderResultsJSON renders results in JSON for range queries.
+func RenderResultsJSON(
w io.Writer,
- series []*ts.Series,
- params models.RequestParams,
- keepNans bool,
-) {
+ result ReadResult,
+ opts RenderResultsOptions,
+) error {
+ var (
+ series = result.Series
+ warnings = result.Meta.WarningStrings()
+ )
+
// NB: if dropping NaNs, drop series with only NaNs from output entirely.
- if !keepNans {
- series = filterNaNSeries(series, params.Start, params.End)
+ if !opts.KeepNaNs {
+ series = filterNaNSeries(series, opts.Start, opts.End)
}
jw := json.NewWriter(w)
@@ -306,6 +320,16 @@ func renderResultsJSON(
jw.BeginObjectField("status")
jw.WriteString("success")
+ if len(warnings) > 0 {
+ jw.BeginObjectField("warnings")
+ jw.BeginArray()
+ for _, warn := range warnings {
+ jw.WriteString(warn)
+ }
+
+ jw.EndArray()
+ }
+
jw.BeginObjectField("data")
jw.BeginObject()
@@ -332,7 +356,7 @@ func renderResultsJSON(
dp := vals.DatapointAt(i)
// If keepNaNs is set to false and the value is NaN, drop it from the response.
- if !keepNans && math.IsNaN(dp.Value) {
+ if !opts.KeepNaNs && math.IsNaN(dp.Value) {
continue
}
@@ -340,7 +364,7 @@ func renderResultsJSON(
// would be at the result node but that would make it inefficient since
// we would need to create another block just for the sake of restricting
// the bounds.
- if dp.Timestamp.Before(params.Start) {
+ if dp.Timestamp.Before(opts.Start) {
continue
}
@@ -349,8 +373,8 @@ func renderResultsJSON(
jw.WriteString(utils.FormatFloat(dp.Value))
jw.EndArray()
}
- jw.EndArray()
+ jw.EndArray()
fixedStep, ok := s.Values().(ts.FixedResolutionMutableValues)
if ok {
jw.BeginObjectField("step_size_ms")
@@ -359,32 +383,69 @@ func renderResultsJSON(
jw.EndObject()
}
jw.EndArray()
-
jw.EndObject()
jw.EndObject()
- jw.Close()
+ return jw.Close()
}
+// renderResultsInstantaneousJSON renders results in JSON for instant queries.
func renderResultsInstantaneousJSON(
w io.Writer,
- series []*ts.Series,
+ result ReadResult,
+ keepNaNs bool,
) {
+ var (
+ series = result.Series
+ warnings = result.Meta.WarningStrings()
+ isScalar = result.BlockType == block.BlockScalar || result.BlockType == block.BlockTime
+ )
+
+ resultType := "vector"
+ if isScalar {
+ resultType = "scalar"
+ }
+
jw := json.NewWriter(w)
jw.BeginObject()
jw.BeginObjectField("status")
jw.WriteString("success")
+ if len(warnings) > 0 {
+ jw.BeginObjectField("warnings")
+ jw.BeginArray()
+ for _, warn := range warnings {
+ jw.WriteString(warn)
+ }
+
+ jw.EndArray()
+ }
+
jw.BeginObjectField("data")
jw.BeginObject()
jw.BeginObjectField("resultType")
- jw.WriteString("vector")
+ jw.WriteString(resultType)
jw.BeginObjectField("result")
jw.BeginArray()
for _, s := range series {
+ vals := s.Values()
+ length := s.Len()
+ dp := vals.DatapointAt(length - 1)
+
+ if isScalar {
+ jw.WriteInt(int(dp.Timestamp.Unix()))
+ jw.WriteString(utils.FormatFloat(dp.Value))
+ continue
+ }
+
+ // If keepNaNs is set to false and the value is NaN, drop it from the response.
+ if !keepNaNs && math.IsNaN(dp.Value) {
+ continue
+ }
+
jw.BeginObject()
jw.BeginObjectField("metric")
jw.BeginObject()
@@ -395,9 +456,6 @@ func renderResultsInstantaneousJSON(
jw.EndObject()
jw.BeginObjectField("value")
- vals := s.Values()
- length := s.Len()
- dp := vals.DatapointAt(length - 1)
jw.BeginArray()
jw.WriteInt(int(dp.Timestamp.Unix()))
jw.WriteString(utils.FormatFloat(dp.Value))
diff --git a/src/query/api/v1/handler/prometheus/native/common_test.go b/src/query/api/v1/handler/prometheus/native/common_test.go
index 52c23e9393..f22ff34bd8 100644
--- a/src/query/api/v1/handler/prometheus/native/common_test.go
+++ b/src/query/api/v1/handler/prometheus/native/common_test.go
@@ -32,12 +32,14 @@ import (
"github.com/m3db/m3/src/query/api/v1/handler/prometheus"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/executor"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/test"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/instrument"
+ xjson "github.com/m3db/m3/src/x/json"
xhttp "github.com/m3db/m3/src/x/net/http"
xtest "github.com/m3db/m3/src/x/test"
@@ -90,7 +92,7 @@ func TestParamParsing(t *testing.T) {
func TestParamParsing_POST(t *testing.T) {
params := defaultParams().Encode()
req := httptest.NewRequest("POST", PromReadURL, strings.NewReader(params))
- req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+ req.Header.Add(xhttp.HeaderContentType, xhttp.ContentTypeFormURLEncoded)
r, err := testParseParams(req)
require.NoError(t, err, "unable to parse request")
@@ -169,86 +171,94 @@ func TestRenderResultsJSON(t *testing.T) {
series := []*ts.Series{
ts.NewSeries([]byte("foo"),
valsWithNaN, test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("bar"), Value: []byte("baz")},
- models.Tag{Name: []byte("qux"), Value: []byte("qaz")},
+ {Name: []byte("bar"), Value: []byte("baz")},
+ {Name: []byte("qux"), Value: []byte("qaz")},
})),
ts.NewSeries([]byte("bar"),
- ts.NewFixedStepValues(10*time.Second, 2, 2, start), test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("baz"), Value: []byte("bar")},
- models.Tag{Name: []byte("qaz"), Value: []byte("qux")},
+ ts.NewFixedStepValues(10*time.Second, 2, 2, start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("baz"), Value: []byte("bar")},
+ {Name: []byte("qaz"), Value: []byte("qux")},
})),
ts.NewSeries([]byte("foobar"),
- ts.NewFixedStepValues(10*time.Second, 2, math.NaN(), start), test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("biz"), Value: []byte("baz")},
- models.Tag{Name: []byte("qux"), Value: []byte("qaz")},
+ ts.NewFixedStepValues(10*time.Second, 2, math.NaN(), start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("biz"), Value: []byte("baz")},
+ {Name: []byte("qux"), Value: []byte("qaz")},
})),
}
- renderResultsJSON(buffer, series, params, true)
+ readResult := ReadResult{Series: series}
+ RenderResultsJSON(buffer, readResult, RenderResultsOptions{
+ Start: params.Start,
+ End: params.End,
+ KeepNaNs: true,
+ })
- expected := xtest.MustPrettyJSON(t, `
- {
+ expected := xtest.MustPrettyJSONMap(t, xjson.Map{
"status": "success",
- "data": {
+ "warnings": xjson.Array{
+ "m3db exceeded query limit: results not exhaustive",
+ },
+ "data": xjson.Map{
"resultType": "matrix",
- "result": [
- {
- "metric": {
+ "result": xjson.Array{
+ xjson.Map{
+ "metric": xjson.Map{
"bar": "baz",
- "qux": "qaz"
+ "qux": "qaz",
},
- "values": [
- [
+ "values": xjson.Array{
+ xjson.Array{
1535948880,
- "1"
- ],
- [
+ "1",
+ },
+ xjson.Array{
1535948890,
- "NaN"
- ]
- ],
- "step_size_ms": 10000
+ "NaN",
+ },
+ },
+ "step_size_ms": 10000,
},
- {
- "metric": {
+ xjson.Map{
+ "metric": xjson.Map{
"baz": "bar",
- "qaz": "qux"
+ "qaz": "qux",
},
- "values": [
- [
+ "values": xjson.Array{
+ xjson.Array{
1535948880,
- "2"
- ],
- [
+ "2",
+ },
+ xjson.Array{
1535948890,
- "2"
- ]
- ],
- "step_size_ms": 10000
+ "2",
+ },
+ },
+ "step_size_ms": 10000,
},
- {
- "metric": {
+ xjson.Map{
+ "metric": xjson.Map{
"biz": "baz",
- "qux": "qaz"
+ "qux": "qaz",
},
- "values": [
- [
+ "values": xjson.Array{
+ xjson.Array{
1535948880,
- "NaN"
- ],
- [
+ "NaN",
+ },
+ xjson.Array{
1535948890,
- "NaN"
- ]
- ],
- "step_size_ms": 10000
- }
- ]
- }
- }
- `)
+ "NaN",
+ },
+ },
+ "step_size_ms": 10000,
+ },
+ },
+ },
+ })
- actual := xtest.MustPrettyJSON(t, buffer.String())
+ actual := xtest.MustPrettyJSONString(t, buffer.String())
assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
@@ -268,117 +278,265 @@ func TestRenderResultsJSONWithDroppedNaNs(t *testing.T) {
series := []*ts.Series{
ts.NewSeries([]byte("foo"),
valsWithNaN, test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("bar"), Value: []byte("baz")},
- models.Tag{Name: []byte("qux"), Value: []byte("qaz")},
+ {Name: []byte("bar"), Value: []byte("baz")},
+ {Name: []byte("qux"), Value: []byte("qaz")},
})),
ts.NewSeries([]byte("bar"),
- ts.NewFixedStepValues(step, 2, 2, start), test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("baz"), Value: []byte("bar")},
- models.Tag{Name: []byte("qaz"), Value: []byte("qux")},
+ ts.NewFixedStepValues(step, 2, 2, start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("baz"), Value: []byte("bar")},
+ {Name: []byte("qaz"), Value: []byte("qux")},
})),
ts.NewSeries([]byte("foobar"),
- ts.NewFixedStepValues(step, 2, math.NaN(), start), test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("biz"), Value: []byte("baz")},
- models.Tag{Name: []byte("qux"), Value: []byte("qaz")},
+ ts.NewFixedStepValues(step, 2, math.NaN(), start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("biz"), Value: []byte("baz")},
+ {Name: []byte("qux"), Value: []byte("qaz")},
})),
}
- renderResultsJSON(buffer, series, params, false)
+ meta := block.NewResultMetadata()
+ meta.AddWarning("foo", "bar")
+ meta.AddWarning("baz", "qux")
+ readResult := ReadResult{
+ Series: series,
+ Meta: meta,
+ }
+
+ RenderResultsJSON(buffer, readResult, RenderResultsOptions{
+ Start: params.Start,
+ End: params.End,
+ KeepNaNs: false,
+ })
- expected := xtest.MustPrettyJSON(t, `
- {
+ expected := xtest.MustPrettyJSONMap(t, xjson.Map{
"status": "success",
- "data": {
+ "warnings": xjson.Array{
+ "foo_bar",
+ "baz_qux",
+ },
+ "data": xjson.Map{
"resultType": "matrix",
- "result": [
- {
- "metric": {
+ "result": xjson.Array{
+ xjson.Map{
+ "metric": xjson.Map{
"bar": "baz",
- "qux": "qaz"
+ "qux": "qaz",
},
- "values": [
- [
+ "values": xjson.Array{
+ xjson.Array{
1535948880,
- "1"
- ]
- ],
- "step_size_ms": 10000
+ "1",
+ },
+ },
+ "step_size_ms": 10000,
},
- {
- "metric": {
+ xjson.Map{
+ "metric": xjson.Map{
"baz": "bar",
- "qaz": "qux"
+ "qaz": "qux",
},
- "values": [
- [
+ "values": xjson.Array{
+ xjson.Array{
1535948880,
- "2"
- ],
- [
+ "2",
+ },
+ xjson.Array{
1535948890,
- "2"
- ]
- ],
- "step_size_ms": 10000
- }
- ]
- }
- }
- `)
+ "2",
+ },
+ },
+ "step_size_ms": 10000,
+ },
+ },
+ },
+ })
- actual := xtest.MustPrettyJSON(t, buffer.String())
+ actual := xtest.MustPrettyJSONString(t, buffer.String())
assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
-func TestRenderInstantaneousResultsJSON(t *testing.T) {
+func TestRenderInstantaneousResultsJSONVector(t *testing.T) {
start := time.Unix(1535948880, 0)
- buffer := bytes.NewBuffer(nil)
+
series := []*ts.Series{
ts.NewSeries([]byte("foo"),
- ts.NewFixedStepValues(10*time.Second, 1, 1, start), test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("bar"), Value: []byte("baz")},
- models.Tag{Name: []byte("qux"), Value: []byte("qaz")},
+ ts.NewFixedStepValues(10*time.Second, 1, 1, start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("bar"), Value: []byte("baz")},
+ {Name: []byte("qux"), Value: []byte("qaz")},
+ })),
+ ts.NewSeries([]byte("nan"),
+ ts.NewFixedStepValues(10*time.Second, 1, math.NaN(), start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("baz"), Value: []byte("bar")},
})),
ts.NewSeries([]byte("bar"),
- ts.NewFixedStepValues(10*time.Second, 1, 2, start), test.TagSliceToTags([]models.Tag{
- models.Tag{Name: []byte("baz"), Value: []byte("bar")},
- models.Tag{Name: []byte("qaz"), Value: []byte("qux")},
+ ts.NewFixedStepValues(10*time.Second, 1, 2, start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("baz"), Value: []byte("bar")},
+ {Name: []byte("qaz"), Value: []byte("qux")},
})),
}
- renderResultsInstantaneousJSON(buffer, series)
+ readResult := ReadResult{
+ Series: series,
+ Meta: block.NewResultMetadata(),
+ }
+
+ foo := xjson.Map{
+ "metric": xjson.Map{
+ "bar": "baz",
+ "qux": "qaz",
+ },
+ "value": xjson.Array{
+ 1535948880,
+ "1",
+ },
+ }
+
+ bar := xjson.Map{
+ "metric": xjson.Map{
+ "baz": "bar",
+ "qaz": "qux",
+ },
+ "value": xjson.Array{
+ 1535948880,
+ "2",
+ },
+ }
+
+ nan := xjson.Map{
+ "metric": xjson.Map{
+ "baz": "bar",
+ },
+ "value": xjson.Array{
+ 1535948880,
+ "NaN",
+ },
+ }
- expected := xtest.MustPrettyJSON(t, `
- {
+ buffer := bytes.NewBuffer(nil)
+ renderResultsInstantaneousJSON(buffer, readResult, true)
+ expectedWithNaN := xtest.MustPrettyJSONMap(t, xjson.Map{
"status": "success",
- "data": {
+ "data": xjson.Map{
"resultType": "vector",
- "result": [
- {
- "metric": {
- "bar": "baz",
- "qux": "qaz"
- },
- "value": [
- 1535948880,
- "1"
- ]
- },
- {
- "metric": {
- "baz": "bar",
- "qaz": "qux"
- },
- "value": [
- 1535948880,
- "2"
- ]
- }
- ]
- }
+ "result": xjson.Array{foo, nan, bar},
+ },
+ })
+ actualWithNaN := xtest.MustPrettyJSONString(t, buffer.String())
+ assert.Equal(t, expectedWithNaN, actualWithNaN, xtest.Diff(expectedWithNaN, actualWithNaN))
+
+ buffer = bytes.NewBuffer(nil)
+ renderResultsInstantaneousJSON(buffer, readResult, false)
+ expectedWithoutNaN := xtest.MustPrettyJSONMap(t, xjson.Map{
+ "status": "success",
+ "data": xjson.Map{
+ "resultType": "vector",
+ "result": xjson.Array{foo, bar},
+ },
+ })
+ actualWithoutNaN := xtest.MustPrettyJSONString(t, buffer.String())
+ assert.Equal(t, expectedWithoutNaN, actualWithoutNaN, xtest.Diff(expectedWithoutNaN, actualWithoutNaN))
+}
+
+func TestRenderInstantaneousResultsNansOnlyJSON(t *testing.T) {
+ start := time.Unix(1535948880, 0)
+
+ series := []*ts.Series{
+ ts.NewSeries([]byte("nan"),
+ ts.NewFixedStepValues(10*time.Second, 1, math.NaN(), start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("qux"), Value: []byte("qaz")},
+ })),
+ ts.NewSeries([]byte("nan"),
+ ts.NewFixedStepValues(10*time.Second, 1, math.NaN(), start),
+ test.TagSliceToTags([]models.Tag{
+ {Name: []byte("baz"), Value: []byte("bar")},
+ })),
}
- `)
- actual := xtest.MustPrettyJSON(t, buffer.String())
+
+ readResult := ReadResult{
+ Series: series,
+ Meta: block.NewResultMetadata(),
+ }
+
+ nan1 := xjson.Map{
+ "metric": xjson.Map{
+ "qux": "qaz",
+ },
+ "value": xjson.Array{
+ 1535948880,
+ "NaN",
+ },
+ }
+
+ nan2 := xjson.Map{
+ "metric": xjson.Map{
+ "baz": "bar",
+ },
+ "value": xjson.Array{
+ 1535948880,
+ "NaN",
+ },
+ }
+
+ buffer := bytes.NewBuffer(nil)
+ renderResultsInstantaneousJSON(buffer, readResult, true)
+ expectedWithNaN := xtest.MustPrettyJSONMap(t, xjson.Map{
+ "status": "success",
+ "data": xjson.Map{
+ "resultType": "vector",
+ "result": xjson.Array{nan1, nan2},
+ },
+ })
+ actualWithNaN := xtest.MustPrettyJSONString(t, buffer.String())
+ assert.Equal(t, expectedWithNaN, actualWithNaN, xtest.Diff(expectedWithNaN, actualWithNaN))
+
+ buffer = bytes.NewBuffer(nil)
+ renderResultsInstantaneousJSON(buffer, readResult, false)
+ expectedWithoutNaN := xtest.MustPrettyJSONMap(t, xjson.Map{
+ "status": "success",
+ "data": xjson.Map{
+ "resultType": "vector",
+ "result": xjson.Array{},
+ },
+ })
+ actualWithoutNaN := xtest.MustPrettyJSONString(t, buffer.String())
+ assert.Equal(t, expectedWithoutNaN, actualWithoutNaN, xtest.Diff(expectedWithoutNaN, actualWithoutNaN))
+}
+
+func TestRenderInstantaneousResultsJSONScalar(t *testing.T) {
+ start := time.Unix(1535948880, 0)
+
+ series := []*ts.Series{
+ ts.NewSeries(
+ []byte("foo"),
+ ts.NewFixedStepValues(10*time.Second, 1, 5, start),
+ test.TagSliceToTags([]models.Tag{})),
+ }
+
+ readResult := ReadResult{
+ Series: series,
+ Meta: block.NewResultMetadata(),
+ BlockType: block.BlockScalar,
+ }
+
+ buffer := bytes.NewBuffer(nil)
+ renderResultsInstantaneousJSON(buffer, readResult, false)
+ expected := xtest.MustPrettyJSONMap(t, xjson.Map{
+ "status": "success",
+ "data": xjson.Map{
+ "resultType": "scalar",
+ "result": xjson.Array{
+ 1535948880,
+ "5",
+ },
+ },
+ })
+
+ actual := xtest.MustPrettyJSONString(t, buffer.String())
assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
diff --git a/src/query/api/v1/handler/prometheus/native/complete_tags.go b/src/query/api/v1/handler/prometheus/native/complete_tags.go
index ed2ffd0c3f..85fe042b43 100644
--- a/src/query/api/v1/handler/prometheus/native/complete_tags.go
+++ b/src/query/api/v1/handler/prometheus/native/complete_tags.go
@@ -30,7 +30,9 @@ import (
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/util/logging"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/instrument"
@@ -66,7 +68,7 @@ func NewCompleteTagsHandler(opts options.HandlerOptions) http.Handler {
func (h *CompleteTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
logger := logging.WithContext(ctx, h.instrumentOpts)
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
tagCompletionQueries, rErr := prometheus.ParseTagCompletionParamsToQueries(r)
if rErr != nil {
@@ -86,8 +88,9 @@ func (h *CompleteTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
multiErr xerrors.MultiError
nameOnly = tagCompletionQueries.NameOnly
- resultBuilder = storage.NewCompleteTagsResultBuilder(nameOnly)
meta = block.NewResultMetadata()
+ resultBuilder = consolidators.NewCompleteTagsResultBuilder(
+ nameOnly, models.NewTagOptions())
)
for _, query := range tagCompletionQueries.Queries {
diff --git a/src/query/api/v1/handler/prometheus/native/complete_tags_test.go b/src/query/api/v1/handler/prometheus/native/complete_tags_test.go
index 12fa4b3da0..843bd86bff 100644
--- a/src/query/api/v1/handler/prometheus/native/complete_tags_test.go
+++ b/src/query/api/v1/handler/prometheus/native/complete_tags_test.go
@@ -30,6 +30,8 @@ import (
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/x/headers"
xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
@@ -52,7 +54,7 @@ var tests = []struct {
{
"non-exhaustive",
block.ResultMetadata{Exhaustive: false},
- handleroptions.LimitHeaderSeriesLimitApplied,
+ headers.LimitHeaderSeriesLimitApplied,
},
{
"warnings",
@@ -75,9 +77,9 @@ func testCompleteTags(t *testing.T, meta block.ResultMetadata, header string) {
// setup storage and handler
store := storage.NewMockStorage(ctrl)
- storeResult := &storage.CompleteTagsResult{
+ storeResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
- CompletedTags: []storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
{Name: b("bar"), Values: [][]byte{b("qux")}},
{Name: b("baz")},
{Name: b("foo")},
@@ -86,8 +88,8 @@ func testCompleteTags(t *testing.T, meta block.ResultMetadata, header string) {
Metadata: meta,
}
- fb := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ fb := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetStorage(store).
SetFetchOptionsBuilder(fb)
@@ -109,7 +111,7 @@ func testCompleteTags(t *testing.T, meta block.ResultMetadata, header string) {
`{"key":"baz","values":[]},{"key":"foo","values":[]}]}`
require.Equal(t, ex, string(r))
- actual := w.Header().Get(handleroptions.LimitHeader)
+ actual := w.Header().Get(headers.LimitHeader)
assert.Equal(t, header, actual)
}
@@ -146,9 +148,9 @@ func TestMultiCompleteTags(t *testing.T) {
store := storage.NewMockStorage(ctrl)
fooMeta := block.NewResultMetadata()
fooMeta.Exhaustive = false
- fooResult := &storage.CompleteTagsResult{
+ fooResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
- CompletedTags: []storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
{Name: b("bar"), Values: [][]byte{b("zulu"), b("quail")}},
{Name: b("foo"), Values: [][]byte{b("quail")}},
},
@@ -158,17 +160,17 @@ func TestMultiCompleteTags(t *testing.T) {
barMeta := block.NewResultMetadata()
barMeta.AddWarning("abc", "def")
- barResult := &storage.CompleteTagsResult{
+ barResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
- CompletedTags: []storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
{Name: b("bar"), Values: [][]byte{b("qux")}},
},
Metadata: barMeta,
}
- fb := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ fb := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetStorage(store).
SetFetchOptionsBuilder(fb)
@@ -194,6 +196,6 @@ func TestMultiCompleteTags(t *testing.T) {
`{"key":"foo","values":["quail"]}]}`
require.Equal(t, ex, string(r))
- actual := w.Header().Get(handleroptions.LimitHeader)
+ actual := w.Header().Get(headers.LimitHeader)
assert.Equal(t, "max_fetch_series_limit_applied,abc_def", actual)
}
diff --git a/src/query/api/v1/handler/prometheus/native/list_tags.go b/src/query/api/v1/handler/prometheus/native/list_tags.go
index e90c8b580f..be5e18af10 100644
--- a/src/query/api/v1/handler/prometheus/native/list_tags.go
+++ b/src/query/api/v1/handler/prometheus/native/list_tags.go
@@ -70,7 +70,7 @@ func NewListTagsHandler(opts options.HandlerOptions) http.Handler {
func (h *ListTagsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
logger := logging.WithContext(ctx, h.instrumentOpts)
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
query := &storage.CompleteTagsQuery{
CompleteNameOnly: true,
diff --git a/src/query/api/v1/handler/prometheus/native/list_tags_test.go b/src/query/api/v1/handler/prometheus/native/list_tags_test.go
index 7e49429745..8ef8a9c32f 100644
--- a/src/query/api/v1/handler/prometheus/native/list_tags_test.go
+++ b/src/query/api/v1/handler/prometheus/native/list_tags_test.go
@@ -34,6 +34,8 @@ import (
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/x/headers"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -93,9 +95,9 @@ func testListTags(t *testing.T, meta block.ResultMetadata, header string) {
// setup storage and handler
store := storage.NewMockStorage(ctrl)
- storeResult := &storage.CompleteTagsResult{
+ storeResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: true,
- CompletedTags: []storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
{Name: b("bar")},
{Name: b("baz")},
{Name: b("foo")},
@@ -109,8 +111,8 @@ func testListTags(t *testing.T, meta block.ResultMetadata, header string) {
return now
}
- fb := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ fb := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetStorage(store).
SetFetchOptionsBuilder(fb).
@@ -137,7 +139,7 @@ func testListTags(t *testing.T, meta block.ResultMetadata, header string) {
ex := `{"status":"success","data":["bar","baz","foo"]}`
require.Equal(t, ex, string(r))
- actual := w.Header().Get(handleroptions.LimitHeader)
+ actual := w.Header().Get(headers.LimitHeader)
assert.Equal(t, header, actual)
}
}
@@ -153,8 +155,8 @@ func TestListErrorTags(t *testing.T) {
return now
}
- fb := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ fb := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetStorage(store).
SetFetchOptionsBuilder(fb).
diff --git a/src/query/api/v1/handler/prometheus/native/parse_query_test.go b/src/query/api/v1/handler/prometheus/native/parse_query_test.go
index 646ac9d48d..c6334c0d57 100644
--- a/src/query/api/v1/handler/prometheus/native/parse_query_test.go
+++ b/src/query/api/v1/handler/prometheus/native/parse_query_test.go
@@ -32,7 +32,7 @@ import (
"github.com/m3db/m3/src/x/instrument"
xtest "github.com/m3db/m3/src/x/test"
- pql "github.com/prometheus/prometheus/promql"
+ pql "github.com/prometheus/prometheus/promql/parser"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -128,8 +128,8 @@ func TestParse(t *testing.T) {
r, err := ioutil.ReadAll(body)
require.NoError(t, err)
- ex := xtest.MustPrettyJSON(t, tt.ex)
- actual := xtest.MustPrettyJSON(t, string(r))
+ ex := xtest.MustPrettyJSONString(t, tt.ex)
+ actual := xtest.MustPrettyJSONString(t, string(r))
require.Equal(t, ex, actual,
fmt.Sprintf("Run %d:\n%s", i, xtest.Diff(ex, actual)))
}
diff --git a/src/query/api/v1/handler/prometheus/native/parse_threshold_test.go b/src/query/api/v1/handler/prometheus/native/parse_threshold_test.go
index 519dd24c60..8266a09be2 100644
--- a/src/query/api/v1/handler/prometheus/native/parse_threshold_test.go
+++ b/src/query/api/v1/handler/prometheus/native/parse_threshold_test.go
@@ -31,7 +31,7 @@ import (
"github.com/m3db/m3/src/query/executor"
"github.com/m3db/m3/src/x/instrument"
xtest "github.com/m3db/m3/src/x/test"
- pql "github.com/prometheus/prometheus/promql"
+ pql "github.com/prometheus/prometheus/promql/parser"
"github.com/stretchr/testify/require"
)
@@ -200,8 +200,8 @@ func TestParseThreshold(t *testing.T) {
r, err := ioutil.ReadAll(body)
require.NoError(t, err)
- ex := xtest.MustPrettyJSON(t, tt.ex)
- actual := xtest.MustPrettyJSON(t, string(r))
+ ex := xtest.MustPrettyJSONString(t, tt.ex)
+ actual := xtest.MustPrettyJSONString(t, string(r))
require.Equal(t, ex, actual,
fmt.Sprintf("Run %d:\n%s", i, xtest.Diff(ex, actual)))
}
diff --git a/src/query/api/v1/handler/prometheus/native/read.go b/src/query/api/v1/handler/prometheus/native/read.go
index 202896a516..c5f8a958ba 100644
--- a/src/query/api/v1/handler/prometheus/native/read.go
+++ b/src/query/api/v1/handler/prometheus/native/read.go
@@ -22,223 +22,139 @@ package native
import (
"context"
- "fmt"
"net/http"
- "github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/query/api/v1/handler"
- "github.com/m3db/m3/src/query/api/v1/handler/prometheus"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/api/v1/options"
- "github.com/m3db/m3/src/query/block"
- "github.com/m3db/m3/src/query/executor"
"github.com/m3db/m3/src/query/models"
- "github.com/m3db/m3/src/query/storage"
- "github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/query/util/logging"
- "github.com/m3db/m3/src/x/instrument"
xhttp "github.com/m3db/m3/src/x/net/http"
xopentracing "github.com/m3db/m3/src/x/opentracing"
opentracingext "github.com/opentracing/opentracing-go/ext"
opentracinglog "github.com/opentracing/opentracing-go/log"
- "github.com/uber-go/tally"
"go.uber.org/zap"
)
const (
// PromReadURL is the url for native prom read handler, this matches the
- // default URL for the query range endpoint found on a Prometheus server
+ // default URL for the query range endpoint found on a Prometheus server.
PromReadURL = handler.RoutePrefixV1 + "/query_range"
- // TODO: Move to config
- initialBlockAlloc = 10
+ // PromReadInstantURL is the url for native instantaneous prom read
+ // handler, this matches the default URL for the query endpoint
+ // found on a Prometheus server.
+ PromReadInstantURL = handler.RoutePrefixV1 + "/query"
)
var (
- // PromReadHTTPMethods are the HTTP methods for this handler.
+ // PromReadHTTPMethods are the HTTP methods for the read handler.
PromReadHTTPMethods = []string{
http.MethodGet,
http.MethodPost,
}
- emptySeriesList = []*ts.Series{}
- emptyReqParams = models.RequestParams{}
+ // PromReadInstantHTTPMethods are the HTTP methods for the instant handler.
+ PromReadInstantHTTPMethods = []string{
+ http.MethodGet,
+ http.MethodPost,
+ }
)
-// PromReadHandler represents a handler for prometheus read endpoint.
-type PromReadHandler struct {
- keepEmpty bool
- limitsCfg *config.LimitsConfiguration
- timeoutOps *prometheus.TimeoutOpts
- engine executor.Engine
- fetchOptionsBuilder handleroptions.FetchOptionsBuilder
- tagOpts models.TagOptions
- promReadMetrics promReadMetrics
- instrumentOpts instrument.Options
-}
-
-type promReadMetrics struct {
- fetchSuccess tally.Counter
- fetchErrorsServer tally.Counter
- fetchErrorsClient tally.Counter
- fetchTimerSuccess tally.Timer
- maxDatapoints tally.Gauge
+// promReadHandler represents a handler for prometheus read endpoint.
+type promReadHandler struct {
+ instant bool
+ promReadMetrics promReadMetrics
+ opts options.HandlerOptions
}
-func newPromReadMetrics(scope tally.Scope) promReadMetrics {
- return promReadMetrics{
- fetchSuccess: scope.Counter("fetch.success"),
- fetchErrorsServer: scope.Tagged(map[string]string{"code": "5XX"}).
- Counter("fetch.errors"),
- fetchErrorsClient: scope.Tagged(map[string]string{"code": "4XX"}).
- Counter("fetch.errors"),
- fetchTimerSuccess: scope.Timer("fetch.success.latency"),
- maxDatapoints: scope.Gauge("max_datapoints"),
- }
-}
-
-// ReadResponse is the response that gets returned to the user
-type ReadResponse struct {
- Results []ts.Series `json:"results,omitempty"`
+// NewPromReadHandler returns a new prometheus-compatible read handler.
+func NewPromReadHandler(opts options.HandlerOptions) http.Handler {
+ return newHandler(opts, false)
}
-type blockWithMeta struct {
- block block.Block
- meta block.Metadata
+// NewPromReadInstantHandler returns a new pro instance of handler.
+func NewPromReadInstantHandler(opts options.HandlerOptions) http.Handler {
+ return newHandler(opts, true)
}
-// RespError wraps error and status code
-type RespError struct {
- Err error
- Code int
-}
+// newHandler returns a new pro instance of handler.
+func newHandler(opts options.HandlerOptions, instant bool) http.Handler {
+ name := "native-read"
+ if instant {
+ name = "native-instant-read"
+ }
-// NewPromReadHandler returns a new instance of handler.
-func NewPromReadHandler(opts options.HandlerOptions) *PromReadHandler {
taggedScope := opts.InstrumentOpts().MetricsScope().
- Tagged(map[string]string{"handler": "native-read"})
- limits := opts.Config().Limits
-
- h := &PromReadHandler{
- engine: opts.Engine(),
- fetchOptionsBuilder: opts.FetchOptionsBuilder(),
- tagOpts: opts.TagOptions(),
- limitsCfg: &limits,
- promReadMetrics: newPromReadMetrics(taggedScope),
- timeoutOps: opts.TimeoutOpts(),
- keepEmpty: opts.Config().ResultOptions.KeepNans,
- instrumentOpts: opts.InstrumentOpts(),
+ Tagged(map[string]string{"handler": name})
+ h := &promReadHandler{
+ promReadMetrics: newPromReadMetrics(taggedScope),
+ opts: opts,
+ instant: instant,
}
- pointCount := float64(limits.MaxComputedDatapoints())
- h.promReadMetrics.maxDatapoints.Update(pointCount)
+ maxDatapoints := opts.Config().Limits.MaxComputedDatapoints()
+ h.promReadMetrics.maxDatapoints.Update(float64(maxDatapoints))
return h
}
-func (h *PromReadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+func (h *promReadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
timer := h.promReadMetrics.fetchTimerSuccess.Start()
- fetchOpts, rErr := h.fetchOptionsBuilder.NewFetchOptions(r)
+ defer timer.Stop()
+
+ ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
+ logger := logging.WithContext(ctx, h.opts.InstrumentOpts())
+
+ parsedOptions, rErr := ParseRequest(ctx, r, h.instant, h.opts)
if rErr != nil {
+ h.promReadMetrics.fetchErrorsClient.Inc(1)
+ logger.Error("could not parse request", zap.Error(rErr.Inner()))
xhttp.Error(w, rErr.Inner(), rErr.Code())
return
}
- queryOpts := &executor.QueryOptions{
- QueryContextOptions: models.QueryContextOptions{
- LimitMaxTimeseries: fetchOpts.Limit,
- }}
-
- restrictOpts := fetchOpts.RestrictQueryOptions.GetRestrictByType()
- if restrictOpts != nil {
- restrict := &models.RestrictFetchTypeQueryContextOptions{
- MetricsType: uint(restrictOpts.MetricsType),
- StoragePolicy: restrictOpts.StoragePolicy,
- }
- queryOpts.QueryContextOptions.RestrictFetchType = restrict
- }
+ watcher := handler.NewResponseWriterCanceller(w, h.opts.InstrumentOpts())
+ parsedOptions.CancelWatcher = watcher
- result, params, respErr := h.ServeHTTPWithEngine(w, r, h.engine, queryOpts, fetchOpts)
- if respErr != nil {
- xhttp.Error(w, respErr.Err, respErr.Code)
- return
- }
+ result, err := read(ctx, parsedOptions, h.opts)
+ if err != nil {
+ sp := xopentracing.SpanFromContextOrNoop(ctx)
+ sp.LogFields(opentracinglog.Error(err))
+ opentracingext.Error.Set(sp, true)
+ logger.Error("range query error",
+ zap.Error(err),
+ zap.Any("parsedOptions", parsedOptions))
+ h.promReadMetrics.fetchErrorsServer.Inc(1)
- w.Header().Set("Content-Type", "application/json")
- if params.FormatType == models.FormatM3QL {
- renderM3QLResultsJSON(w, result, params)
- h.promReadMetrics.fetchSuccess.Inc(1)
- timer.Stop()
+ xhttp.Error(w, err, http.StatusInternalServerError)
return
}
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
+ handleroptions.AddWarningHeaders(w, result.Meta)
h.promReadMetrics.fetchSuccess.Inc(1)
- timer.Stop()
- // TODO: Support multiple result types
- renderResultsJSON(w, result, params, h.keepEmpty)
-}
-
-// ServeHTTPWithEngine returns query results from the storage
-func (h *PromReadHandler) ServeHTTPWithEngine(
- w http.ResponseWriter,
- r *http.Request,
- engine executor.Engine,
- opts *executor.QueryOptions,
- fetchOpts *storage.FetchOptions,
-) ([]*ts.Series, models.RequestParams, *RespError) {
- ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
- logger := logging.WithContext(ctx, h.instrumentOpts)
- params, rErr := parseParams(r, engine.Options(),
- h.timeoutOps, fetchOpts, h.instrumentOpts)
- if rErr != nil {
- h.promReadMetrics.fetchErrorsClient.Inc(1)
- return nil, emptyReqParams, &RespError{Err: rErr.Inner(), Code: rErr.Code()}
+ if h.instant {
+ renderResultsInstantaneousJSON(w, result, h.opts.Config().ResultOptions.KeepNans)
+ return
}
- if params.Debug {
- logger.Info("Request params", zap.Any("params", params))
+ if parsedOptions.Params.FormatType == models.FormatM3QL {
+ renderM3QLResultsJSON(w, result.Series, parsedOptions.Params)
+ return
}
- if err := h.validateRequest(¶ms); err != nil {
- h.promReadMetrics.fetchErrorsClient.Inc(1)
- return nil, emptyReqParams, &RespError{Err: err, Code: http.StatusBadRequest}
- }
+ err = RenderResultsJSON(w, result, RenderResultsOptions{
+ Start: parsedOptions.Params.Start,
+ End: parsedOptions.Params.End,
+ KeepNaNs: h.opts.Config().ResultOptions.KeepNans,
+ })
- result, err := read(ctx, engine, opts, fetchOpts, h.tagOpts,
- w, params, h.instrumentOpts)
if err != nil {
- sp := xopentracing.SpanFromContextOrNoop(ctx)
- sp.LogFields(opentracinglog.Error(err))
- opentracingext.Error.Set(sp, true)
- logger.Error("unable to fetch data", zap.Error(err))
- h.promReadMetrics.fetchErrorsServer.Inc(1)
- return nil, emptyReqParams, &RespError{
- Err: err,
- Code: http.StatusInternalServerError,
- }
+ w.WriteHeader(http.StatusInternalServerError)
+ logger.Error("failed to render results", zap.Error(err))
+ } else {
+ w.WriteHeader(http.StatusOK)
}
-
- // TODO: Support multiple result types
- w.Header().Set("Content-Type", "application/json")
- handleroptions.AddWarningHeaders(w, result.meta)
- return result.series, params, nil
-}
-
-func (h *PromReadHandler) validateRequest(params *models.RequestParams) error {
- // Impose a rough limit on the number of returned time series. This is intended to prevent things like
- // querying from the beginning of time with a 1s step size.
- // Approach taken directly from prom.
- numSteps := int64(params.End.Sub(params.Start) / params.Step)
- maxComputedDatapoints := h.limitsCfg.MaxComputedDatapoints()
- if maxComputedDatapoints > 0 && numSteps > maxComputedDatapoints {
- return fmt.Errorf(
- "querying from %v to %v with step size %v would result in too many datapoints "+
- "(end - start / step > %d). Either decrease the query resolution (?step=XX), decrease the time window, "+
- "or increase the limit (`limits.maxComputedDatapoints`)",
- params.Start, params.End, params.Step, maxComputedDatapoints,
- )
- }
-
- return nil
}
diff --git a/src/query/api/v1/handler/prometheus/native/read_common.go b/src/query/api/v1/handler/prometheus/native/read_common.go
index 84da0a2b08..434329d464 100644
--- a/src/query/api/v1/handler/prometheus/native/read_common.go
+++ b/src/query/api/v1/handler/prometheus/native/read_common.go
@@ -25,273 +25,237 @@ import (
"fmt"
"math"
"net/http"
- "sort"
"github.com/m3db/m3/src/query/api/v1/handler"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus"
+ "github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/executor"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser/promql"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/ts"
- "github.com/m3db/m3/src/x/instrument"
+ xhttp "github.com/m3db/m3/src/x/net/http"
xopentracing "github.com/m3db/m3/src/x/opentracing"
+ "github.com/uber-go/tally"
opentracinglog "github.com/opentracing/opentracing-go/log"
)
-type readResult struct {
- series []*ts.Series
- meta block.ResultMetadata
+type promReadMetrics struct {
+ fetchSuccess tally.Counter
+ fetchErrorsServer tally.Counter
+ fetchErrorsClient tally.Counter
+ fetchTimerSuccess tally.Timer
+ maxDatapoints tally.Gauge
}
-func read(
- reqCtx context.Context,
- engine executor.Engine,
- opts *executor.QueryOptions,
- fetchOpts *storage.FetchOptions,
- tagOpts models.TagOptions,
- w http.ResponseWriter,
- params models.RequestParams,
- instrumentOpts instrument.Options,
-) (readResult, error) {
- ctx, cancel := context.WithTimeout(reqCtx, params.Timeout)
- defer cancel()
-
- sp := xopentracing.SpanFromContextOrNoop(ctx)
- sp.LogFields(
- opentracinglog.String("params.query", params.Query),
- xopentracing.Time("params.start", params.Start),
- xopentracing.Time("params.end", params.End),
- xopentracing.Time("params.now", params.Now),
- xopentracing.Duration("params.step", params.Step),
- )
-
- // Detect clients closing connections.
- handler.CloseWatcher(ctx, cancel, w, instrumentOpts)
- emptyResult := readResult{meta: block.NewResultMetadata()}
-
- // TODO: Capture timing
- parseOpts := engine.Options().ParseOptions()
- parser, err := promql.Parse(params.Query, params.Step, tagOpts, parseOpts)
- if err != nil {
- return emptyResult, err
- }
-
- result, err := engine.ExecuteExpr(ctx, parser, opts, fetchOpts, params)
- if err != nil {
- return emptyResult, err
+func newPromReadMetrics(scope tally.Scope) promReadMetrics {
+ return promReadMetrics{
+ fetchSuccess: scope.Counter("fetch.success"),
+ fetchErrorsServer: scope.Tagged(map[string]string{"code": "5XX"}).
+ Counter("fetch.errors"),
+ fetchErrorsClient: scope.Tagged(map[string]string{"code": "4XX"}).
+ Counter("fetch.errors"),
+ fetchTimerSuccess: scope.Timer("fetch.success.latency"),
+ maxDatapoints: scope.Gauge("max_datapoints"),
}
+}
- // Block slices are sorted by start time.
- // TODO: Pooling
- sortedBlockList := make([]blockWithMeta, 0, initialBlockAlloc)
- resultChan := result.ResultChan()
- defer func() {
- for range resultChan {
- // NB: drain result channel in case of early termination.
- }
- }()
-
- var (
- numSteps int
- numSeries int
+// ReadResponse is the response that gets returned to the user
+type ReadResponse struct {
+ Results []ts.Series `json:"results,omitempty"`
+}
- firstElement bool
- )
+// ReadResult is a result from a remote read.
+type ReadResult struct {
+ Series []*ts.Series
+ Meta block.ResultMetadata
+ BlockType block.BlockType
+}
- meta := block.NewResultMetadata()
- // TODO(nikunj): Stream blocks to client
- for blkResult := range resultChan {
- if err := blkResult.Err; err != nil {
- return emptyResult, err
- }
+// ParseRequest parses the given request.
+func ParseRequest(
+ ctx context.Context,
+ r *http.Request,
+ instantaneous bool,
+ opts options.HandlerOptions,
+) (ParsedOptions, *xhttp.ParseError) {
+ fetchOpts, rErr := opts.FetchOptionsBuilder().NewFetchOptions(r)
+ if rErr != nil {
+ return ParsedOptions{}, rErr
+ }
- b := blkResult.Block
- if !firstElement {
- firstElement = true
- firstStepIter, err := b.StepIter()
- if err != nil {
- return emptyResult, err
- }
-
- numSteps = firstStepIter.StepCount()
- numSeries = len(firstStepIter.SeriesMeta())
- meta = b.Meta().ResultMetadata
+ queryOpts := &executor.QueryOptions{
+ QueryContextOptions: models.QueryContextOptions{
+ LimitMaxTimeseries: fetchOpts.SeriesLimit,
+ LimitMaxDocs: fetchOpts.DocsLimit,
+ }}
+
+ restrictOpts := fetchOpts.RestrictQueryOptions.GetRestrictByType()
+ if restrictOpts != nil {
+ restrict := &models.RestrictFetchTypeQueryContextOptions{
+ MetricsType: uint(restrictOpts.MetricsType),
+ StoragePolicy: restrictOpts.StoragePolicy,
}
- // Insert blocks sorted by start time.
- insertResult, err := insertSortedBlock(b, sortedBlockList,
- numSteps, numSeries)
- if err != nil {
- return emptyResult, err
- }
+ queryOpts.QueryContextOptions.RestrictFetchType = restrict
+ }
- sortedBlockList = insertResult.blocks
- meta = meta.CombineMetadata(insertResult.meta)
+ engine := opts.Engine()
+ var params models.RequestParams
+ if instantaneous {
+ params, rErr = parseInstantaneousParams(r, engine.Options(),
+ opts.TimeoutOpts(), fetchOpts, opts.InstrumentOpts())
+ } else {
+ params, rErr = parseParams(r, engine.Options(),
+ opts.TimeoutOpts(), fetchOpts, opts.InstrumentOpts())
}
- // Ensure that the blocks are closed. Can't do this above since
- // sortedBlockList might change.
- defer func() {
- for _, b := range sortedBlockList {
- // FIXME: this will double close blocks that have gone through the
- // function pipeline.
- b.block.Close()
- }
- }()
+ if rErr != nil {
+ return ParsedOptions{}, rErr
+ }
- series, err := sortedBlocksToSeriesList(sortedBlockList)
- if err != nil {
- return emptyResult, err
+ maxPoints := opts.Config().Limits.MaxComputedDatapoints()
+ if err := validateRequest(params, maxPoints); err != nil {
+ return ParsedOptions{}, xhttp.NewParseError(err, http.StatusBadRequest)
}
- series = prometheus.FilterSeriesByOptions(series, fetchOpts)
- return readResult{
- series: series,
- meta: meta,
+ return ParsedOptions{
+ QueryOpts: queryOpts,
+ FetchOpts: fetchOpts,
+ Params: params,
}, nil
}
-func sortedBlocksToSeriesList(blockList []blockWithMeta) ([]*ts.Series, error) {
- if len(blockList) == 0 {
- return emptySeriesList, nil
+func validateRequest(params models.RequestParams, maxPoints int) error {
+ // Impose a rough limit on the number of returned time series.
+ // This is intended to prevent things like querying from the beginning of
+ // time with a 1s step size.
+ numSteps := int(params.End.Sub(params.Start) / params.Step)
+ if maxPoints > 0 && numSteps > maxPoints {
+ return fmt.Errorf(
+ "querying from %v to %v with step size %v would result in too many "+
+ "datapoints (end - start / step > %d). Either decrease the query "+
+ "resolution (?step=XX), decrease the time window, or increase "+
+ "the limit (`limits.maxComputedDatapoints`)",
+ params.Start, params.End, params.Step, maxPoints,
+ )
}
+ return nil
+}
+
+// ParsedOptions are parsed options for the query.
+type ParsedOptions struct {
+ QueryOpts *executor.QueryOptions
+ FetchOpts *storage.FetchOptions
+ Params models.RequestParams
+ CancelWatcher handler.CancelWatcher
+}
+
+func read(
+ ctx context.Context,
+ parsed ParsedOptions,
+ handlerOpts options.HandlerOptions,
+) (ReadResult, error) {
var (
- firstBlock = blockList[0].block
- meta = firstBlock.Meta()
- bounds = meta.Bounds
- commonTags = meta.Tags.Tags
+ opts = parsed.QueryOpts
+ fetchOpts = parsed.FetchOpts
+ params = parsed.Params
+ cancelWatcher = parsed.CancelWatcher
+
+ tagOpts = handlerOpts.TagOptions()
+ engine = handlerOpts.Engine()
+ )
+ sp := xopentracing.SpanFromContextOrNoop(ctx)
+ sp.LogFields(
+ opentracinglog.String("params.query", params.Query),
+ xopentracing.Time("params.start", params.Start),
+ xopentracing.Time("params.end", params.End),
+ xopentracing.Time("params.now", params.Now),
+ xopentracing.Duration("params.step", params.Step),
)
- firstIter, err := firstBlock.StepIter()
- if err != nil {
- return nil, err
+ emptyResult := ReadResult{
+ Meta: block.NewResultMetadata(),
+ BlockType: block.BlockEmpty,
}
- var (
- seriesMeta = firstIter.SeriesMeta()
- numSeries = len(seriesMeta)
- seriesList = make([]*ts.Series, 0, numSeries)
- iters = make([]block.StepIter, 0, len(blockList))
- )
+ // TODO: Capture timing
+ parseOpts := engine.Options().ParseOptions()
+ parser, err := promql.Parse(params.Query, params.Step, tagOpts, parseOpts)
+ if err != nil {
+ return emptyResult, err
+ }
- // To create individual series, we iterate over seriesIterators for each
- // block in the block list. For each iterator, the nth current() will
- // be combined to give the nth series.
- for _, b := range blockList {
- it, err := b.block.StepIter()
- if err != nil {
- return nil, err
- }
+ // Detect clients closing connections.
+ if cancelWatcher != nil {
+ ctx, cancel := context.WithTimeout(ctx, fetchOpts.Timeout)
+ defer cancel()
- iters = append(iters, it)
+ cancelWatcher.WatchForCancel(ctx, cancel)
}
- numValues := 0
- for _, block := range blockList {
- b, err := block.block.StepIter()
- if err != nil {
- return nil, err
- }
+ bl, err := engine.ExecuteExpr(ctx, parser, opts, fetchOpts, params)
+ if err != nil {
+ return emptyResult, err
+ }
- numValues += b.StepCount()
+ resultMeta := bl.Meta().ResultMetadata
+ it, err := bl.StepIter()
+ if err != nil {
+ return emptyResult, err
}
+ seriesMeta := it.SeriesMeta()
+ numSeries := len(seriesMeta)
+
+ bounds := bl.Meta().Bounds
// Initialize data slices.
- data := make([]ts.FixedResolutionMutableValues, numSeries)
- for i := range data {
- data[i] = ts.NewFixedStepValues(bounds.StepSize, numValues,
- math.NaN(), bounds.Start)
+ data := make([]ts.FixedResolutionMutableValues, 0, numSeries)
+ for i := 0; i < numSeries; i++ {
+ data = append(data, ts.NewFixedStepValues(bounds.StepSize, bounds.Steps(),
+ math.NaN(), bounds.Start))
}
stepIndex := 0
- for _, it := range iters {
- for it.Next() {
- step := it.Current()
- for seriesIndex, v := range step.Values() {
- // NB: iteration moves by time step across a block, so each value in the
- // step iterator corresponds to a different series; transform it to
- // series-based iteration using mutable series values.
- mutableValuesForSeries := data[seriesIndex]
- mutableValuesForSeries.SetValueAt(stepIndex, v)
- }
-
- stepIndex++
+ for it.Next() {
+ step := it.Current()
+ for seriesIndex, v := range step.Values() {
+ mutableValuesForSeries := data[seriesIndex]
+ mutableValuesForSeries.SetValueAt(stepIndex, v)
}
- if err := it.Err(); err != nil {
- return nil, err
- }
+ stepIndex++
+ }
+
+ if err := it.Err(); err != nil {
+ return emptyResult, err
}
+ seriesList := make([]*ts.Series, 0, len(data))
for i, values := range data {
var (
meta = seriesMeta[i]
- tags = meta.Tags.AddTags(commonTags)
+ tags = meta.Tags.AddTags(bl.Meta().Tags.Tags)
series = ts.NewSeries(meta.Name, values, tags)
)
seriesList = append(seriesList, series)
}
- return seriesList, nil
-}
-
-type insertBlockResult struct {
- blocks []blockWithMeta
- meta block.ResultMetadata
-}
-
-func insertSortedBlock(
- b block.Block,
- blockList []blockWithMeta,
- stepCount,
- seriesCount int,
-) (insertBlockResult, error) {
- it, err := b.StepIter()
- emptyResult := insertBlockResult{meta: b.Meta().ResultMetadata}
- if err != nil {
+ if err := bl.Close(); err != nil {
return emptyResult, err
}
- meta := b.Meta()
- if len(blockList) == 0 {
- blockList = append(blockList, blockWithMeta{
- block: b,
- meta: meta,
- })
-
- return insertBlockResult{
- blocks: blockList,
- meta: b.Meta().ResultMetadata,
- }, nil
- }
+ seriesList = prometheus.FilterSeriesByOptions(seriesList, fetchOpts)
- blockSeriesCount := len(it.SeriesMeta())
- if seriesCount != blockSeriesCount {
- return emptyResult, fmt.Errorf(
- "mismatch in number of series for the block, wanted: %d, found: %d",
- seriesCount, blockSeriesCount)
- }
-
- // Binary search to keep the start times sorted
- index := sort.Search(len(blockList), func(i int) bool {
- return blockList[i].meta.Bounds.Start.After(meta.Bounds.Start)
- })
-
- // Append here ensures enough size in the slice
- blockList = append(blockList, blockWithMeta{})
- copy(blockList[index+1:], blockList[index:])
- blockList[index] = blockWithMeta{
- block: b,
- meta: meta,
- }
+ blockType := bl.Info().Type()
- return insertBlockResult{
- meta: b.Meta().ResultMetadata,
- blocks: blockList,
+ return ReadResult{
+ Series: seriesList,
+ Meta: resultMeta,
+ BlockType: blockType,
}, nil
}
diff --git a/src/query/api/v1/handler/prometheus/native/read_instantaneous.go b/src/query/api/v1/handler/prometheus/native/read_instantaneous.go
deleted file mode 100644
index 38e746008f..0000000000
--- a/src/query/api/v1/handler/prometheus/native/read_instantaneous.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package native
-
-import (
- "context"
- "net/http"
-
- "github.com/m3db/m3/src/query/api/v1/handler"
- "github.com/m3db/m3/src/query/api/v1/handler/prometheus"
- "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
- "github.com/m3db/m3/src/query/api/v1/options"
- "github.com/m3db/m3/src/query/executor"
- "github.com/m3db/m3/src/query/models"
- "github.com/m3db/m3/src/query/util/logging"
- "github.com/m3db/m3/src/x/instrument"
- xhttp "github.com/m3db/m3/src/x/net/http"
-
- "go.uber.org/zap"
-)
-
-const (
- // PromReadInstantURL is the url for native instantaneous prom read
- // handler, this matches the default URL for the query endpoint
- // found on a Prometheus server
- PromReadInstantURL = handler.RoutePrefixV1 + "/query"
-)
-
-var (
- // PromReadInstantHTTPMethods are the HTTP methods for this handler.
- PromReadInstantHTTPMethods = []string{
- http.MethodGet,
- http.MethodPost,
- }
-)
-
-// PromReadInstantHandler represents a handler for prometheus instantaneous read endpoint.
-type PromReadInstantHandler struct {
- engine executor.Engine
- fetchOptionsBuilder handleroptions.FetchOptionsBuilder
- tagOpts models.TagOptions
- timeoutOpts *prometheus.TimeoutOpts
- instrumentOpts instrument.Options
-}
-
-// NewPromReadInstantHandler returns a new instance of handler.
-func NewPromReadInstantHandler(
- opts options.HandlerOptions) *PromReadInstantHandler {
- return &PromReadInstantHandler{
- engine: opts.Engine(),
- fetchOptionsBuilder: opts.FetchOptionsBuilder(),
- tagOpts: opts.TagOptions(),
- timeoutOpts: opts.TimeoutOpts(),
- instrumentOpts: opts.InstrumentOpts(),
- }
-}
-
-func (h *PromReadInstantHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
- logger := logging.WithContext(ctx, h.instrumentOpts)
-
- fetchOpts, rErr := h.fetchOptionsBuilder.NewFetchOptions(r)
- if rErr != nil {
- xhttp.Error(w, rErr.Inner(), rErr.Code())
- return
- }
-
- params, rErr := parseInstantaneousParams(r, h.engine.Options(),
- h.timeoutOpts, fetchOpts, h.instrumentOpts)
- if rErr != nil {
- xhttp.Error(w, rErr, rErr.Code())
- return
- }
-
- if params.Debug {
- logger.Info("request params", zap.Any("params", params))
- }
-
- queryOpts := &executor.QueryOptions{
- QueryContextOptions: models.QueryContextOptions{
- LimitMaxTimeseries: fetchOpts.Limit,
- }}
-
- restrictOpts := fetchOpts.RestrictQueryOptions.GetRestrictByType()
- if restrictOpts != nil {
- restrict := &models.RestrictFetchTypeQueryContextOptions{
- MetricsType: uint(restrictOpts.MetricsType),
- StoragePolicy: restrictOpts.StoragePolicy,
- }
- queryOpts.QueryContextOptions.RestrictFetchType = restrict
- }
-
- result, err := read(ctx, h.engine, queryOpts, fetchOpts,
- h.tagOpts, w, params, h.instrumentOpts)
- if err != nil {
- logger.Error("unable to fetch data", zap.Error(err))
- xhttp.Error(w, err, http.StatusInternalServerError)
- return
- }
-
- // TODO: Support multiple result types
- w.Header().Set("Content-Type", "application/json")
- handleroptions.AddWarningHeaders(w, result.meta)
- renderResultsInstantaneousJSON(w, result.series)
-}
diff --git a/src/query/api/v1/handler/prometheus/native/read_instantaneous_test.go b/src/query/api/v1/handler/prometheus/native/read_instantaneous_test.go
index 11dda2858d..9336296687 100644
--- a/src/query/api/v1/handler/prometheus/native/read_instantaneous_test.go
+++ b/src/query/api/v1/handler/prometheus/native/read_instantaneous_test.go
@@ -30,10 +30,11 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/test"
+ "github.com/m3db/m3/src/x/headers"
+ xjson "github.com/m3db/m3/src/x/json"
xtest "github.com/m3db/m3/src/x/test"
"github.com/stretchr/testify/assert"
@@ -79,21 +80,23 @@ func (v vectorResultValues) parse() (time.Time, int, error) {
}
func TestPromReadInstantHandler(t *testing.T) {
- testPromReadInstantHandler(t, block.NewResultMetadata(), "")
- testPromReadInstantHandler(t, buildWarningMeta("foo", "bar"), "foo_bar")
+ testPromReadInstantHandler(t, block.NewResultMetadata(), "", "")
+ testPromReadInstantHandler(t, buildWarningMeta("foo", "bar"), "foo_bar", "foo_bar")
testPromReadInstantHandler(t, block.ResultMetadata{Exhaustive: false},
- handleroptions.LimitHeaderSeriesLimitApplied)
+ headers.LimitHeaderSeriesLimitApplied,
+ "m3db exceeded query limit: results not exhaustive")
}
func testPromReadInstantHandler(
t *testing.T,
resultMeta block.ResultMetadata,
ex string,
+ jsonWarning string,
) {
values, bounds := test.GenerateValuesAndBounds(nil, nil)
setup := newTestSetup()
- promReadInstant := setup.Handlers.InstantRead
+ promReadInstant := setup.Handlers.instantRead
seriesMeta := test.NewSeriesMeta("dummy", len(values))
meta := block.Metadata{
@@ -118,7 +121,7 @@ func testPromReadInstantHandler(
require.Equal(t, http.StatusOK, recorder.Result().StatusCode)
- header := recorder.Header().Get(handleroptions.LimitHeader)
+ header := recorder.Header().Get(headers.LimitHeader)
assert.Equal(t, ex, header)
var result vectorResult
@@ -130,43 +133,47 @@ func testPromReadInstantHandler(
at1, value1, err := result.Data.Result[1].Value.parse()
require.NoError(t, err)
- expected := xtest.MustPrettyJSON(t, fmt.Sprintf(`
- {
+ expectedResp := xjson.Map{
"status": "success",
- "data": {
+ "data": xjson.Map{
"resultType": "vector",
- "result": [
- {
- "metric": {
+ "result": xjson.Array{
+ xjson.Map{
+ "metric": xjson.Map{
"__name__": "dummy0",
- "dummy0": "dummy0"
+ "dummy0": "dummy0",
+ },
+ "value": xjson.Array{
+ at0.Unix(),
+ strconv.Itoa(value0),
},
- "value": [
- %d,
- "%d"
- ]
},
- {
- "metric": {
+ xjson.Map{
+ "metric": xjson.Map{
"__name__": "dummy1",
- "dummy1": "dummy1"
+ "dummy1": "dummy1",
+ },
+ "value": xjson.Array{
+ at1.Unix(),
+ strconv.Itoa(value1),
},
- "value": [
- %d,
- "%d"
- ]
- }
- ]
- }
+ },
+ },
+ },
}
- `, at0.Unix(), value0, at1.Unix(), value1))
- actual := xtest.MustPrettyJSON(t, recorder.Body.String())
+
+ if len(jsonWarning) != 0 {
+ expectedResp["warnings"] = xjson.Array{jsonWarning}
+ }
+
+ expected := xtest.MustPrettyJSONMap(t, expectedResp)
+ actual := xtest.MustPrettyJSONString(t, recorder.Body.String())
assert.Equal(t, expected, actual, xtest.Diff(expected, actual))
}
func TestPromReadInstantHandlerStorageError(t *testing.T) {
setup := newTestSetup()
- promReadInstant := setup.Handlers.InstantRead
+ promReadInstant := setup.Handlers.instantRead
storageErr := fmt.Errorf("storage err")
setup.Storage.SetFetchBlocksResult(block.Result{}, storageErr)
diff --git a/src/query/api/v1/handler/prometheus/native/read_test.go b/src/query/api/v1/handler/prometheus/native/read_test.go
index d31fcfd0f5..2e0709c0f6 100644
--- a/src/query/api/v1/handler/prometheus/native/read_test.go
+++ b/src/query/api/v1/handler/prometheus/native/read_test.go
@@ -40,6 +40,7 @@ import (
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/mock"
"github.com/m3db/m3/src/query/test"
+ "github.com/m3db/m3/src/x/headers"
"github.com/m3db/m3/src/x/instrument"
"github.com/stretchr/testify/assert"
@@ -50,7 +51,7 @@ func TestPromReadHandlerRead(t *testing.T) {
testPromReadHandlerRead(t, block.NewResultMetadata(), "")
testPromReadHandlerRead(t, buildWarningMeta("foo", "bar"), "foo_bar")
testPromReadHandlerRead(t, block.ResultMetadata{Exhaustive: false},
- handleroptions.LimitHeaderSeriesLimitApplied)
+ headers.LimitHeaderSeriesLimitApplied)
}
func testPromReadHandlerRead(
@@ -61,7 +62,7 @@ func testPromReadHandlerRead(
values, bounds := test.GenerateValuesAndBounds(nil, nil)
setup := newTestSetup()
- promRead := setup.Handlers.Read
+ promRead := setup.Handlers.read
seriesMeta := test.NewSeriesMeta("dummy", len(values))
m := block.Metadata{
@@ -79,12 +80,16 @@ func testPromReadHandlerRead(
r, parseErr := testParseParams(req)
require.Nil(t, parseErr)
assert.Equal(t, models.FormatPromQL, r.FormatType)
- result, err := read(context.TODO(), promRead.engine,
- setup.QueryOpts, setup.FetchOpts, promRead.tagOpts, httptest.NewRecorder(),
- r, instrument.NewOptions())
+ parsed := ParsedOptions{
+ QueryOpts: setup.QueryOpts,
+ FetchOpts: setup.FetchOpts,
+ Params: r,
+ }
- seriesList := result.series
+ result, err := read(context.TODO(), parsed, promRead.opts)
require.NoError(t, err)
+ seriesList := result.Series
+
require.Len(t, seriesList, 2)
s := seriesList[0]
@@ -105,7 +110,7 @@ func TestM3PromReadHandlerRead(t *testing.T) {
testM3PromReadHandlerRead(t, block.NewResultMetadata(), "")
testM3PromReadHandlerRead(t, buildWarningMeta("foo", "bar"), "foo_bar")
testM3PromReadHandlerRead(t, block.ResultMetadata{Exhaustive: false},
- handleroptions.LimitHeaderSeriesLimitApplied)
+ headers.LimitHeaderSeriesLimitApplied)
}
func testM3PromReadHandlerRead(
@@ -116,7 +121,7 @@ func testM3PromReadHandlerRead(
values, bounds := test.GenerateValuesAndBounds(nil, nil)
setup := newTestSetup()
- promRead := setup.Handlers.Read
+ promRead := setup.Handlers.read
seriesMeta := test.NewSeriesMeta("dummy", len(values))
meta := block.Metadata{
@@ -129,13 +134,13 @@ func testM3PromReadHandlerRead(
setup.Storage.SetFetchBlocksResult(block.Result{Blocks: []block.Block{b}}, nil)
req, _ := http.NewRequest("GET", PromReadURL, nil)
- req.Header.Add("X-M3-Render-Format", "m3ql")
+ req.Header.Add(headers.RenderFormat, "m3ql")
req.URL.RawQuery = defaultParams().Encode()
recorder := httptest.NewRecorder()
promRead.ServeHTTP(recorder, req)
- header := recorder.Header().Get(handleroptions.LimitHeader)
+ header := recorder.Header().Get(headers.LimitHeader)
assert.Equal(t, ex, header)
var m3qlResp M3QLResp
@@ -150,7 +155,6 @@ func testM3PromReadHandlerRead(
assert.Equal(t, map[string]string{"__name__": "dummy1", "dummy1": "dummy1"},
m3qlResp[1].Tags)
assert.Equal(t, 10000, m3qlResp[1].StepSizeMs)
-
}
func newReadRequest(t *testing.T, params url.Values) *http.Request {
@@ -166,11 +170,12 @@ type testSetup struct {
QueryOpts *executor.QueryOptions
FetchOpts *storage.FetchOptions
TimeoutOpts *prometheus.TimeoutOpts
+ options options.HandlerOptions
}
type testSetupHandlers struct {
- Read *PromReadHandler
- InstantRead *PromReadInstantHandler
+ read *promReadHandler
+ instantRead *promReadHandler
}
func newTestSetup() *testSetup {
@@ -202,28 +207,32 @@ func newTestSetup() *testSetup {
},
})
- read := NewPromReadHandler(opts)
- instantRead := NewPromReadInstantHandler(opts)
+ read := NewPromReadHandler(opts).(*promReadHandler)
+ instantRead := NewPromReadInstantHandler(opts).(*promReadHandler)
return &testSetup{
Storage: mockStorage,
Handlers: testSetupHandlers{
- Read: read,
- InstantRead: instantRead,
+ read: read,
+ instantRead: instantRead,
},
QueryOpts: &executor.QueryOptions{},
FetchOpts: storage.NewFetchOptions(),
TimeoutOpts: timeoutOpts,
+ options: opts,
}
}
-func TestPromReadHandler_ServeHTTP_maxComputedDatapoints(t *testing.T) {
+func TestPromReadHandlerServeHTTPMaxComputedDatapoints(t *testing.T) {
setup := newTestSetup()
- setup.Handlers.Read.limitsCfg = &config.LimitsConfiguration{
- PerQuery: config.PerQueryLimitsConfiguration{
- PrivateMaxComputedDatapoints: 3599,
+ opts := setup.Handlers.read.opts
+ setup.Handlers.read.opts = opts.SetConfig(config.Configuration{
+ Limits: config.LimitsConfiguration{
+ PerQuery: config.PerQueryLimitsConfiguration{
+ PrivateMaxComputedDatapoints: 3599,
+ },
},
- }
+ })
params := defaultParams()
params.Set(startParam, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC).
@@ -234,7 +243,7 @@ func TestPromReadHandler_ServeHTTP_maxComputedDatapoints(t *testing.T) {
req := newReadRequest(t, params)
recorder := httptest.NewRecorder()
- setup.Handlers.Read.ServeHTTP(recorder, req)
+ setup.Handlers.read.ServeHTTP(recorder, req)
resp := recorder.Result()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
@@ -260,87 +269,79 @@ func TestPromReadHandler_validateRequest(t *testing.T) {
}
cases := []struct {
- Name string
- Params *models.RequestParams
- Max int64
- ErrorExpected bool
+ name string
+ params models.RequestParams
+ max int
+ errorExpected bool
}{{
- Name: "under limit",
- Params: &models.RequestParams{
+ name: "under limit",
+ params: models.RequestParams{
Step: time.Second,
Start: dt(2018, 1, 1, 0),
End: dt(2018, 1, 1, 1),
},
- Max: 3601,
- ErrorExpected: false,
+ max: 3601,
+ errorExpected: false,
}, {
- Name: "at limit",
- Params: &models.RequestParams{
+ name: "at limit",
+ params: models.RequestParams{
Step: time.Second,
Start: dt(2018, 1, 1, 0),
End: dt(2018, 1, 1, 1),
},
- Max: 3600,
- ErrorExpected: false,
+ max: 3600,
+ errorExpected: false,
}, {
- Name: "over limit",
- Params: &models.RequestParams{
+ name: "over limit",
+ params: models.RequestParams{
Step: time.Second,
Start: dt(2018, 1, 1, 0),
End: dt(2018, 1, 1, 1),
},
- Max: 3599,
- ErrorExpected: true,
+ max: 3599,
+ errorExpected: true,
}, {
- Name: "large query, limit disabled (0)",
- Params: &models.RequestParams{
+ name: "large query, limit disabled (0)",
+ params: models.RequestParams{
Step: time.Second,
Start: dt(2018, 1, 1, 0),
End: dt(2018, 1, 1, 1),
},
- Max: 0,
- ErrorExpected: false,
+ max: 0,
+ errorExpected: false,
}, {
- Name: "large query, limit disabled (negative)",
- Params: &models.RequestParams{
+ name: "large query, limit disabled (negative)",
+ params: models.RequestParams{
Step: time.Second,
Start: dt(2018, 1, 1, 0),
End: dt(2018, 1, 1, 1),
},
- Max: -50,
- ErrorExpected: false,
+ max: -50,
+ errorExpected: false,
}, {
- Name: "uneven step over limit",
- Params: &models.RequestParams{
+ name: "uneven step over limit",
+ params: models.RequestParams{
Step: 34 * time.Minute,
Start: dt(2018, 1, 1, 0),
End: dt(2018, 1, 1, 11),
},
- Max: 1,
- ErrorExpected: true,
+ max: 1,
+ errorExpected: true,
}, {
- Name: "uneven step under limit",
- Params: &models.RequestParams{
+ name: "uneven step under limit",
+ params: models.RequestParams{
Step: 34 * time.Minute,
Start: dt(2018, 1, 1, 0),
End: dt(2018, 1, 1, 1),
},
- Max: 2,
- ErrorExpected: false},
+ max: 2,
+ errorExpected: false},
}
for _, tc := range cases {
- t.Run(tc.Name, func(t *testing.T) {
- setup := newTestSetup()
- setup.Handlers.Read.limitsCfg = &config.LimitsConfiguration{
- PerQuery: config.PerQueryLimitsConfiguration{
- PrivateMaxComputedDatapoints: tc.Max,
- },
- }
-
- err := setup.Handlers.Read.validateRequest(tc.Params)
-
- if tc.ErrorExpected {
+ t.Run(tc.name, func(t *testing.T) {
+ err := validateRequest(tc.params, tc.max)
+ if tc.errorExpected {
require.Error(t, err)
} else {
require.NoError(t, err)
diff --git a/src/query/api/v1/handler/prometheus/remote/mag_tags_test.go b/src/query/api/v1/handler/prometheus/remote/mag_tags_test.go
new file mode 100644
index 0000000000..6a767924db
--- /dev/null
+++ b/src/query/api/v1/handler/prometheus/remote/mag_tags_test.go
@@ -0,0 +1,120 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package remote
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/generated/proto/prompb"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMapTags_Append(t *testing.T) {
+ req := &prompb.WriteRequest{
+ Timeseries: []prompb.TimeSeries{
+ {
+ Labels: []prompb.Label{
+ {Name: []byte("tag1"), Value: []byte("val1")},
+ {Name: []byte("tag2"), Value: []byte("val1")},
+ {Name: []byte("tag3"), Value: []byte("val4")},
+ },
+ Samples: []prompb.Sample{},
+ },
+ {
+ Labels: []prompb.Label{
+ {Name: []byte("tag1"), Value: []byte("val1")},
+ },
+ Samples: []prompb.Sample{},
+ },
+ {
+ Labels: []prompb.Label{},
+ Samples: []prompb.Sample{},
+ },
+ },
+ }
+
+ opts := handleroptions.MapTagsOptions{
+ TagMappers: []handleroptions.TagMapper{
+ {Write: handleroptions.WriteOp{Tag: "tag1", Value: "val2"}},
+ {Write: handleroptions.WriteOp{Tag: "tag2", Value: "val3"}},
+ },
+ }
+
+ err := mapTags(req, opts)
+ assert.NoError(t, err)
+
+ exp := &prompb.WriteRequest{
+ Timeseries: []prompb.TimeSeries{
+ {
+ Labels: []prompb.Label{
+ {Name: []byte("tag1"), Value: []byte("val2")},
+ {Name: []byte("tag2"), Value: []byte("val3")},
+ {Name: []byte("tag3"), Value: []byte("val4")},
+ },
+ Samples: []prompb.Sample{},
+ },
+ {
+ Labels: []prompb.Label{
+ {Name: []byte("tag1"), Value: []byte("val2")},
+ {Name: []byte("tag2"), Value: []byte("val3")},
+ },
+ Samples: []prompb.Sample{},
+ },
+ {
+ Labels: []prompb.Label{
+ {Name: []byte("tag1"), Value: []byte("val2")},
+ {Name: []byte("tag2"), Value: []byte("val3")},
+ },
+ Samples: []prompb.Sample{},
+ },
+ },
+ }
+
+ assert.Equal(t, exp, req)
+}
+
+func TestMapTags_Err(t *testing.T) {
+ req := &prompb.WriteRequest{}
+ opts := handleroptions.MapTagsOptions{
+ TagMappers: []handleroptions.TagMapper{
+ {
+ Write: handleroptions.WriteOp{Tag: "tag1", Value: "val2"},
+ Drop: handleroptions.DropOp{Tag: "tag2"},
+ },
+ },
+ }
+
+ err := mapTags(req, opts)
+ assert.Error(t, err)
+
+ opts.TagMappers[0] = handleroptions.TagMapper{
+ Drop: handleroptions.DropOp{Tag: "foo"},
+ }
+ err = mapTags(req, opts)
+ assert.Error(t, err)
+
+ opts.TagMappers[0] = handleroptions.TagMapper{
+ Replace: handleroptions.ReplaceOp{Tag: "foo"},
+ }
+ err = mapTags(req, opts)
+ assert.Error(t, err)
+}
diff --git a/src/query/api/v1/handler/prometheus/remote/map_tags.go b/src/query/api/v1/handler/prometheus/remote/map_tags.go
new file mode 100644
index 0000000000..af4331c50f
--- /dev/null
+++ b/src/query/api/v1/handler/prometheus/remote/map_tags.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package remote
+
+import (
+ "bytes"
+ "errors"
+
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/generated/proto/prompb"
+)
+
+// mapTags modifies a given write request based on the tag mappers passed.
+func mapTags(req *prompb.WriteRequest, opts handleroptions.MapTagsOptions) error {
+ for _, mapper := range opts.TagMappers {
+ if err := mapper.Validate(); err != nil {
+ return err
+ }
+
+ if op := mapper.Write; !op.IsEmpty() {
+ tag := []byte(op.Tag)
+ value := []byte(op.Value)
+
+ for i, ts := range req.Timeseries {
+ replaced := false
+ for j, l := range ts.Labels {
+ if bytes.Equal(l.Name, tag) {
+ ts.Labels[j].Value = value
+ replaced = true
+ }
+ }
+
+ if !replaced {
+ // No existing labels with this tag, append it.
+ req.Timeseries[i].Labels = append(ts.Labels, prompb.Label{
+ Name: tag,
+ Value: value,
+ })
+ }
+ }
+ }
+
+ if op := mapper.Drop; !op.IsEmpty() {
+ return errors.New("Drop operation is not yet supported")
+ }
+
+ if op := mapper.DropWithValue; !op.IsEmpty() {
+ return errors.New("DropWithValue operation is not yet supported")
+ }
+
+ if op := mapper.Replace; !op.IsEmpty() {
+ return errors.New("Replace operation is not yet supported")
+ }
+ }
+
+ return nil
+}
diff --git a/src/query/api/v1/handler/prometheus/remote/match.go b/src/query/api/v1/handler/prometheus/remote/match.go
index b0ebb0fe51..ec71430363 100644
--- a/src/query/api/v1/handler/prometheus/remote/match.go
+++ b/src/query/api/v1/handler/prometheus/remote/match.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/parser/promql"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/instrument"
@@ -55,6 +56,7 @@ type PromSeriesMatchHandler struct {
tagOptions models.TagOptions
fetchOptionsBuilder handleroptions.FetchOptionsBuilder
instrumentOpts instrument.Options
+ parseOpts promql.ParseOptions
}
// NewPromSeriesMatchHandler returns a new instance of handler.
@@ -64,16 +66,17 @@ func NewPromSeriesMatchHandler(opts options.HandlerOptions) http.Handler {
storage: opts.Storage(),
fetchOptionsBuilder: opts.FetchOptionsBuilder(),
instrumentOpts: opts.InstrumentOpts(),
+ parseOpts: opts.Engine().Options().ParseOptions(),
}
}
func (h *PromSeriesMatchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
logger := logging.WithContext(ctx, h.instrumentOpts)
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
w.Header().Set("Access-Control-Allow-Origin", "*")
- queries, err := prometheus.ParseSeriesMatchQuery(r, h.tagOptions)
+ queries, err := prometheus.ParseSeriesMatchQuery(r, h.parseOpts, h.tagOptions)
if err != nil {
logger.Error("unable to parse series match values to query", zap.Error(err))
xhttp.Error(w, err, http.StatusBadRequest)
diff --git a/src/query/api/v1/handler/prometheus/remote/read.go b/src/query/api/v1/handler/prometheus/remote/read.go
index de1cbaf7b8..cc0f4b6230 100644
--- a/src/query/api/v1/handler/prometheus/remote/read.go
+++ b/src/query/api/v1/handler/prometheus/remote/read.go
@@ -23,10 +23,15 @@ package remote
import (
"bytes"
"context"
+ "encoding/json"
+ "errors"
+ "fmt"
"net/http"
+ "strings"
"sync"
"time"
+ comparator "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
"github.com/m3db/m3/src/query/api/v1/handler"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
@@ -35,14 +40,18 @@ import (
"github.com/m3db/m3/src/query/executor"
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/m3db/m3/src/query/models"
+ xpromql "github.com/m3db/m3/src/query/parser/promql"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/ts"
+ "github.com/m3db/m3/src/query/util"
"github.com/m3db/m3/src/query/util/logging"
xerrors "github.com/m3db/m3/src/x/errors"
- "github.com/m3db/m3/src/x/instrument"
xhttp "github.com/m3db/m3/src/x/net/http"
"github.com/golang/protobuf/proto"
"github.com/golang/snappy"
+ "github.com/prometheus/prometheus/pkg/labels"
+ promql "github.com/prometheus/prometheus/promql/parser"
"github.com/uber-go/tally"
"go.uber.org/zap"
)
@@ -50,32 +59,26 @@ import (
const (
// PromReadURL is the url for remote prom read handler
PromReadURL = handler.RoutePrefixV1 + "/prom/remote/read"
+)
- // PromReadHTTPMethod is the HTTP method used with this resource.
- PromReadHTTPMethod = http.MethodPost
+var (
+ // PromReadHTTPMethods are the HTTP methods used with this resource.
+ PromReadHTTPMethods = []string{http.MethodPost, http.MethodGet}
)
-// PromReadHandler represents a handler for prometheus read endpoint.
-type PromReadHandler struct {
- engine executor.Engine
- promReadMetrics promReadMetrics
- timeoutOpts *prometheus.TimeoutOpts
- fetchOptionsBuilder handleroptions.FetchOptionsBuilder
- keepEmpty bool
- instrumentOpts instrument.Options
+// promReadHandler is a handler for the prometheus remote read endpoint.
+type promReadHandler struct {
+ promReadMetrics promReadMetrics
+ opts options.HandlerOptions
}
// NewPromReadHandler returns a new instance of handler.
func NewPromReadHandler(opts options.HandlerOptions) http.Handler {
taggedScope := opts.InstrumentOpts().MetricsScope().
Tagged(map[string]string{"handler": "remote-read"})
- return &PromReadHandler{
- engine: opts.Engine(),
- promReadMetrics: newPromReadMetrics(taggedScope),
- timeoutOpts: opts.TimeoutOpts(),
- fetchOptionsBuilder: opts.FetchOptionsBuilder(),
- keepEmpty: opts.Config().ResultOptions.KeepNans,
- instrumentOpts: opts.InstrumentOpts(),
+ return &promReadHandler{
+ promReadMetrics: newPromReadMetrics(taggedScope),
+ opts: opts,
}
}
@@ -98,67 +101,147 @@ func newPromReadMetrics(scope tally.Scope) promReadMetrics {
}
}
-func (h *PromReadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+func (h *promReadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
timer := h.promReadMetrics.fetchTimerSuccess.Start()
+ defer timer.Stop()
+
ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
- logger := logging.WithContext(ctx, h.instrumentOpts)
- req, rErr := h.parseRequest(r)
+ logger := logging.WithContext(ctx, h.opts.InstrumentOpts())
+ req, fetchOpts, rErr := ParseRequest(ctx, r, h.opts)
if rErr != nil {
- xhttp.Error(w, rErr.Inner(), rErr.Code())
+ err := rErr.Inner()
+ h.promReadMetrics.fetchErrorsClient.Inc(1)
+ logger.Error("remote read query parse error",
+ zap.Error(err),
+ zap.Any("req", req),
+ zap.Any("fetchOpts", fetchOpts))
+ xhttp.Error(w, err, rErr.Code())
return
}
- timeout, err := prometheus.ParseRequestTimeout(r, h.timeoutOpts.FetchTimeout)
+ cancelWatcher := handler.NewResponseWriterCanceller(w, h.opts.InstrumentOpts())
+ readResult, err := Read(ctx, cancelWatcher, req, fetchOpts, h.opts)
if err != nil {
- h.promReadMetrics.fetchErrorsClient.Inc(1)
- xhttp.Error(w, err, http.StatusBadRequest)
+ h.promReadMetrics.fetchErrorsServer.Inc(1)
+ logger.Error("remote read query error",
+ zap.Error(err),
+ zap.Any("req", req),
+ zap.Any("fetchOpts", fetchOpts))
+ xhttp.Error(w, err, http.StatusInternalServerError)
return
}
- fetchOpts, rErr := h.fetchOptionsBuilder.NewFetchOptions(r)
- if rErr != nil {
- xhttp.Error(w, rErr.Inner(), rErr.Code())
- return
+ // NB: if this errors, all relevant headers and information should already
+ // be sent to the writer; so it is not necessary to do anything here other
+ // than increment success/failure metrics.
+ switch r.FormValue("format") {
+ case "json":
+ result := readResultsJSON{
+ Queries: make([]queryResultsJSON, 0, len(req.Queries)),
+ }
+ for i, q := range req.Queries {
+ start := storage.PromTimestampToTime(q.StartTimestampMs)
+ end := storage.PromTimestampToTime(q.EndTimestampMs)
+
+ all := readResult.Result[i].Timeseries
+ timeseries := make([]comparator.Series, 0, len(all))
+ for _, s := range all {
+ datapoints := storage.PromSamplesToM3Datapoints(s.Samples)
+ tags := storage.PromLabelsToM3Tags(s.Labels, h.opts.TagOptions())
+ series := toSeries(datapoints, tags)
+ series.Start = start
+ series.End = end
+ timeseries = append(timeseries, series)
+ }
+
+ matchers := make([]labelMatcherJSON, 0, len(q.Matchers))
+ for _, m := range q.Matchers {
+ matcher := labelMatcherJSON{
+ Type: m.Type.String(),
+ Name: string(m.Name),
+ Value: string(m.Value),
+ }
+ matchers = append(matchers, matcher)
+ }
+
+ result.Queries = append(result.Queries, queryResultsJSON{
+ Query: queryJSON{
+ Matchers: matchers,
+ },
+ Start: start,
+ End: end,
+ Series: timeseries,
+ })
+ }
+
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
+ handleroptions.AddWarningHeaders(w, readResult.Meta)
+
+ err = json.NewEncoder(w).Encode(result)
+ default:
+ err = WriteSnappyCompressed(w, readResult, logger)
}
- readResult, err := h.read(ctx, w, req, timeout, fetchOpts)
if err != nil {
h.promReadMetrics.fetchErrorsServer.Inc(1)
- logger.Error("unable to fetch data", zap.Error(err))
- xhttp.Error(w, err, http.StatusInternalServerError)
- return
+ } else {
+ h.promReadMetrics.fetchSuccess.Inc(1)
}
+}
+type readResultsJSON struct {
+ Queries []queryResultsJSON `json:"queries"`
+}
+
+type queryResultsJSON struct {
+ Query queryJSON `json:"query"`
+ Start time.Time `json:"start"`
+ End time.Time `json:"end"`
+ Series []comparator.Series `json:"series"`
+}
+
+type queryJSON struct {
+ Matchers []labelMatcherJSON `json:"matchers"`
+}
+
+type labelMatcherJSON struct {
+ Type string `json:"type"`
+ Name string `json:"name"`
+ Value string `json:"value"`
+}
+
+// WriteSnappyCompressed writes snappy compressed results to the given writer.
+func WriteSnappyCompressed(
+ w http.ResponseWriter,
+ readResult ReadResult,
+ logger *zap.Logger,
+) error {
resp := &prompb.ReadResponse{
- Results: readResult.result,
+ Results: readResult.Result,
}
data, err := proto.Marshal(resp)
if err != nil {
- h.promReadMetrics.fetchErrorsServer.Inc(1)
logger.Error("unable to marshal read results to protobuf", zap.Error(err))
xhttp.Error(w, err, http.StatusInternalServerError)
- return
+ return err
}
- w.Header().Set("Content-Type", "application/x-protobuf")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeProtobuf)
w.Header().Set("Content-Encoding", "snappy")
- handleroptions.AddWarningHeaders(w, readResult.meta)
+ handleroptions.AddWarningHeaders(w, readResult.Meta)
compressed := snappy.Encode(nil, data)
if _, err := w.Write(compressed); err != nil {
- h.promReadMetrics.fetchErrorsServer.Inc(1)
logger.Error("unable to encode read results to snappy",
zap.Error(err))
xhttp.Error(w, err, http.StatusInternalServerError)
- return
}
- timer.Stop()
- h.promReadMetrics.fetchSuccess.Inc(1)
+ return err
}
-func (h *PromReadHandler) parseRequest(
+func parseCompressedRequest(
r *http.Request,
) (*prompb.ReadRequest, *xhttp.ParseError) {
result, err := prometheus.ParsePromCompressedRequest(r)
@@ -174,18 +257,153 @@ func (h *PromReadHandler) parseRequest(
return &req, nil
}
-type readResult struct {
- meta block.ResultMetadata
- result []*prompb.QueryResult
+// ReadResult is a read result.
+type ReadResult struct {
+ Meta block.ResultMetadata
+ Result []*prompb.QueryResult
}
-func (h *PromReadHandler) read(
- reqCtx context.Context,
- w http.ResponseWriter,
+// ParseExpr parses a prometheus request expression into the constituent
+// fetches, rather than the full query application.
+func ParseExpr(
+ r *http.Request,
+ opts xpromql.ParseOptions,
+) (*prompb.ReadRequest, *xhttp.ParseError) {
+ var req *prompb.ReadRequest
+ exprParam := strings.TrimSpace(r.FormValue("query"))
+ if len(exprParam) == 0 {
+ return nil, xhttp.NewParseError(
+ fmt.Errorf("cannot parse params: no expr"),
+ http.StatusBadRequest)
+ }
+
+ queryStart, err := util.ParseTimeString(r.FormValue("start"))
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ queryEnd, err := util.ParseTimeString(r.FormValue("end"))
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ fn := opts.ParseFn()
+ req = &prompb.ReadRequest{}
+ expr, err := fn(exprParam)
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ var vectorsInspected []*promql.VectorSelector
+ promql.Inspect(expr, func(node promql.Node, path []promql.Node) error {
+ var (
+ start = queryStart
+ end = queryEnd
+ offset time.Duration
+ labelMatchers []*labels.Matcher
+ )
+
+ if n, ok := node.(*promql.MatrixSelector); ok {
+ if n.Range > 0 {
+ start = start.Add(-1 * n.Range)
+ }
+
+ vectorSelector := n.VectorSelector.(*promql.VectorSelector)
+
+ // Check already inspected (matrix can be walked further into
+ // child vector selector).
+ for _, existing := range vectorsInspected {
+ if existing == vectorSelector {
+ return nil // Already inspected.
+ }
+ }
+
+ vectorsInspected = append(vectorsInspected, vectorSelector)
+
+ offset = vectorSelector.Offset
+ labelMatchers = vectorSelector.LabelMatchers
+ } else if n, ok := node.(*promql.VectorSelector); ok {
+ // Check already inspected (matrix can be walked further into
+ // child vector selector).
+ for _, existing := range vectorsInspected {
+ if existing == n {
+ return nil // Already inspected.
+ }
+ }
+
+ vectorsInspected = append(vectorsInspected, n)
+
+ offset = n.Offset
+ labelMatchers = n.LabelMatchers
+ } else {
+ return nil
+ }
+
+ if offset > 0 {
+ start = start.Add(-1 * offset)
+ end = end.Add(-1 * offset)
+ }
+
+ matchers, err := toLabelMatchers(labelMatchers)
+ if err != nil {
+ return err
+ }
+
+ query := &prompb.Query{
+ StartTimestampMs: storage.TimeToPromTimestamp(start),
+ EndTimestampMs: storage.TimeToPromTimestamp(end),
+ Matchers: matchers,
+ }
+
+ req.Queries = append(req.Queries, query)
+ return nil
+ })
+
+ return req, nil
+}
+
+// ParseRequest parses the compressed request
+func ParseRequest(
+ ctx context.Context,
+ r *http.Request,
+ opts options.HandlerOptions,
+) (*prompb.ReadRequest, *storage.FetchOptions, *xhttp.ParseError) {
+ var req *prompb.ReadRequest
+ var rErr *xhttp.ParseError
+ switch {
+ case r.Method == http.MethodGet && strings.TrimSpace(r.FormValue("query")) != "":
+ req, rErr = ParseExpr(r, opts.Engine().Options().ParseOptions())
+ default:
+ req, rErr = parseCompressedRequest(r)
+ }
+
+ if rErr != nil {
+ return nil, nil, rErr
+ }
+
+ timeout := opts.TimeoutOpts().FetchTimeout
+ timeout, err := prometheus.ParseRequestTimeout(r, timeout)
+ if err != nil {
+ return nil, nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ fetchOpts, rErr := opts.FetchOptionsBuilder().NewFetchOptions(r)
+ if rErr != nil {
+ return nil, nil, rErr
+ }
+
+ fetchOpts.Timeout = timeout
+ return req, fetchOpts, nil
+}
+
+// Read performs a remote read on the given engine.
+func Read(
+ ctx context.Context,
+ cancelWatcher handler.CancelWatcher,
r *prompb.ReadRequest,
- timeout time.Duration,
fetchOpts *storage.FetchOptions,
-) (readResult, error) {
+ opts options.HandlerOptions,
+) (ReadResult, error) {
var (
queryCount = len(r.Queries)
cancelFuncs = make([]context.CancelFunc, queryCount)
@@ -193,9 +411,12 @@ func (h *PromReadHandler) read(
meta = block.NewResultMetadata()
queryOpts = &executor.QueryOptions{
QueryContextOptions: models.QueryContextOptions{
- LimitMaxTimeseries: fetchOpts.Limit,
+ LimitMaxTimeseries: fetchOpts.SeriesLimit,
+ LimitMaxDocs: fetchOpts.DocsLimit,
}}
+ engine = opts.Engine()
+
wg sync.WaitGroup
mu sync.Mutex
multiErr xerrors.MultiError
@@ -205,8 +426,12 @@ func (h *PromReadHandler) read(
for i, promQuery := range r.Queries {
i, promQuery := i, promQuery // Capture vars for lambda.
go func() {
- defer wg.Done()
- ctx, cancel := context.WithTimeout(reqCtx, timeout)
+ ctx, cancel := context.WithTimeout(ctx, fetchOpts.Timeout)
+ defer func() {
+ wg.Done()
+ cancel()
+ }()
+
cancelFuncs[i] = cancel
query, err := storage.PromReadQueryToM3(promQuery)
if err != nil {
@@ -216,9 +441,12 @@ func (h *PromReadHandler) read(
return
}
- // Detect clients closing connections
- handler.CloseWatcher(ctx, cancel, w, h.instrumentOpts)
- result, err := h.engine.ExecuteProm(ctx, query, queryOpts, fetchOpts)
+ // Detect clients closing connections.
+ if cancelWatcher != nil {
+ cancelWatcher.WatchForCancel(ctx, cancel)
+ }
+
+ result, err := engine.ExecuteProm(ctx, query, queryOpts, fetchOpts)
if err != nil {
mu.Lock()
multiErr = multiErr.Add(err)
@@ -241,10 +469,10 @@ func (h *PromReadHandler) read(
}
if err := multiErr.FinalError(); err != nil {
- return readResult{result: nil, meta: meta}, err
+ return ReadResult{Result: nil, Meta: meta}, err
}
- return readResult{result: queryResults, meta: meta}, nil
+ return ReadResult{Result: queryResults, Meta: meta}, nil
}
// filterResults removes series tags based on options.
@@ -295,3 +523,57 @@ func filterLabels(
return filtered
}
+
+func tagsConvert(ts models.Tags) comparator.Tags {
+ tags := make(comparator.Tags, 0, ts.Len())
+ for _, t := range ts.Tags {
+ tags = append(tags, comparator.NewTag(string(t.Name), string(t.Value)))
+ }
+
+ return tags
+}
+
+func datapointsConvert(dps ts.Datapoints) comparator.Datapoints {
+ datapoints := make(comparator.Datapoints, 0, dps.Len())
+ for _, dp := range dps.Datapoints() {
+ val := comparator.Datapoint{
+ Value: comparator.Value(dp.Value),
+ Timestamp: dp.Timestamp,
+ }
+ datapoints = append(datapoints, val)
+ }
+
+ return datapoints
+}
+
+func toSeries(dps ts.Datapoints, tags models.Tags) comparator.Series {
+ return comparator.Series{
+ Tags: tagsConvert(tags),
+ Datapoints: datapointsConvert(dps),
+ }
+}
+
+func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) {
+ pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers))
+ for _, m := range matchers {
+ var mType prompb.LabelMatcher_Type
+ switch m.Type {
+ case labels.MatchEqual:
+ mType = prompb.LabelMatcher_EQ
+ case labels.MatchNotEqual:
+ mType = prompb.LabelMatcher_NEQ
+ case labels.MatchRegexp:
+ mType = prompb.LabelMatcher_RE
+ case labels.MatchNotRegexp:
+ mType = prompb.LabelMatcher_NRE
+ default:
+ return nil, errors.New("invalid matcher type")
+ }
+ pbMatchers = append(pbMatchers, &prompb.LabelMatcher{
+ Type: mType,
+ Name: []byte(m.Name),
+ Value: []byte(m.Value),
+ })
+ }
+ return pbMatchers, nil
+}
diff --git a/src/query/api/v1/handler/prometheus/remote/read_test.go b/src/query/api/v1/handler/prometheus/remote/read_test.go
index cc1d7399e8..aebaebcda2 100644
--- a/src/query/api/v1/handler/prometheus/remote/read_test.go
+++ b/src/query/api/v1/handler/prometheus/remote/read_test.go
@@ -21,17 +21,22 @@
package remote
import (
+ "bytes"
"context"
"fmt"
+ "io"
"net/http"
"net/http/httptest"
+ "net/url"
"strings"
+ "sync"
"testing"
"time"
"github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/dbnode/client"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
+ "github.com/m3db/m3/src/query/api/v1/handler"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/api/v1/options"
@@ -40,11 +45,14 @@ import (
"github.com/m3db/m3/src/query/executor"
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/m3db/m3/src/query/models"
+ xpromql "github.com/m3db/m3/src/query/parser/promql"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/test"
"github.com/m3db/m3/src/query/test/m3"
xclock "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xhttp "github.com/m3db/m3/src/x/net/http"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -61,6 +69,66 @@ var (
}
)
+type testVals struct {
+ start time.Time
+ query string
+}
+
+func buildBody(query string, start time.Time) io.Reader {
+ vals := url.Values{}
+ vals.Add("query", query)
+ vals.Add("start", start.Format(time.RFC3339))
+ vals.Add("end", start.Add(time.Hour).Format(time.RFC3339))
+ qs := vals.Encode()
+ return bytes.NewBuffer([]byte(qs))
+}
+
+func TestParseExpr(t *testing.T) {
+ query := "" +
+ `up{a="b"} + 7 - sum(rate(down{c!="d"}[2m])) + ` +
+ `left{e=~"f"} offset 30m and right{g!~"h"} + ` + `
+ max_over_time(foo[1m] offset 1h)`
+
+ start := time.Now().Truncate(time.Hour)
+ req := httptest.NewRequest(http.MethodPost, "/", buildBody(query, start))
+ req.Header.Add(xhttp.HeaderContentType, xhttp.ContentTypeFormURLEncoded)
+ readReq, err := ParseExpr(req, xpromql.NewParseOptions())
+ require.NoError(t, err)
+
+ q := func(start, end time.Time, matchers []*prompb.LabelMatcher) *prompb.Query {
+ return &prompb.Query{
+ StartTimestampMs: start.Unix() * 1000,
+ EndTimestampMs: end.Unix() * 1000,
+ Matchers: matchers,
+ }
+ }
+
+ b := func(s string) []byte { return []byte(s) }
+ expected := []*prompb.Query{
+ q(start, start.Add(time.Hour),
+ []*prompb.LabelMatcher{
+ {Name: b("a"), Value: b("b"), Type: prompb.LabelMatcher_EQ},
+ {Name: b("__name__"), Value: b("up"), Type: prompb.LabelMatcher_EQ}}),
+ q(start.Add(time.Minute*-2), start.Add(time.Hour),
+ []*prompb.LabelMatcher{
+ {Name: b("c"), Value: b("d"), Type: prompb.LabelMatcher_NEQ},
+ {Name: b("__name__"), Value: b("down"), Type: prompb.LabelMatcher_EQ}}),
+ q(start.Add(time.Minute*-30), start.Add(time.Minute*30),
+ []*prompb.LabelMatcher{
+ {Name: b("e"), Value: b("f"), Type: prompb.LabelMatcher_RE},
+ {Name: b("__name__"), Value: b("left"), Type: prompb.LabelMatcher_EQ}}),
+ q(start, start.Add(time.Hour),
+ []*prompb.LabelMatcher{
+ {Name: b("g"), Value: b("h"), Type: prompb.LabelMatcher_NRE},
+ {Name: b("__name__"), Value: b("right"), Type: prompb.LabelMatcher_EQ}}),
+ q(start.Add(time.Minute*-61), start,
+ []*prompb.LabelMatcher{
+ {Name: b("__name__"), Value: b("foo"), Type: prompb.LabelMatcher_EQ}}),
+ }
+
+ assert.Equal(t, expected, readReq.Queries)
+}
+
func newEngine(
s storage.Storage,
lookbackDuration time.Duration,
@@ -77,99 +145,102 @@ func newEngine(
}
func setupServer(t *testing.T) *httptest.Server {
- ctrl := gomock.NewController(t)
- // No calls expected on session object
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
lstore, session := m3.NewStorageAndSession(t, ctrl)
session.EXPECT().
FetchTagged(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil, client.FetchResponseMetadata{Exhaustive: false}, fmt.Errorf("not initialized"))
+ Return(nil, client.FetchResponseMetadata{Exhaustive: false},
+ fmt.Errorf("not initialized")).MaxTimes(1)
storage := test.NewSlowStorage(lstore, 10*time.Millisecond)
promRead := readHandler(storage, timeoutOpts)
server := httptest.NewServer(test.NewSlowHandler(promRead, 10*time.Millisecond))
return server
}
-func readHandler(store storage.Storage, timeoutOpts *prometheus.TimeoutOpts) *PromReadHandler {
- opts := handleroptions.FetchOptionsBuilderOptions{Limit: 100}
- engine := newEngine(store, defaultLookbackDuration, nil,
- instrument.NewOptions())
- return &PromReadHandler{
- engine: engine,
- promReadMetrics: promReadTestMetrics,
- timeoutOpts: timeoutOpts,
- fetchOptionsBuilder: handleroptions.NewFetchOptionsBuilder(opts),
- instrumentOpts: instrument.NewOptions(),
+func readHandler(store storage.Storage,
+ timeoutOpts *prometheus.TimeoutOpts) http.Handler {
+ fetchOpts := handleroptions.FetchOptionsBuilderOptions{
+ Limits: handleroptions.FetchOptionsBuilderLimitsOptions{
+ SeriesLimit: 100,
+ },
}
+ iOpts := instrument.NewOptions()
+ engine := newEngine(store, defaultLookbackDuration, nil, iOpts)
+ opts := options.EmptyHandlerOptions().
+ SetEngine(engine).
+ SetInstrumentOpts(iOpts).
+ SetFetchOptionsBuilder(handleroptions.NewFetchOptionsBuilder(fetchOpts)).
+ SetTimeoutOpts(timeoutOpts)
+
+ return NewPromReadHandler(opts)
}
func TestPromReadParsing(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
storage, _ := m3.NewStorageAndSession(t, ctrl)
- opts := handleroptions.FetchOptionsBuilderOptions{Limit: 100}
+ builderOpts := handleroptions.FetchOptionsBuilderOptions{
+ Limits: handleroptions.FetchOptionsBuilderLimitsOptions{
+ SeriesLimit: 100,
+ },
+ }
engine := newEngine(storage, defaultLookbackDuration, nil,
instrument.NewOptions())
- promRead := &PromReadHandler{
- engine: engine,
- promReadMetrics: promReadTestMetrics,
- fetchOptionsBuilder: handleroptions.NewFetchOptionsBuilder(opts),
- }
+
+ opts := options.EmptyHandlerOptions().
+ SetEngine(engine).
+ SetFetchOptionsBuilder(handleroptions.NewFetchOptionsBuilder(builderOpts)).
+ SetTimeoutOpts(timeoutOpts)
req := httptest.NewRequest("POST", PromReadURL, test.GeneratePromReadBody(t))
- r, err := promRead.parseRequest(req)
+ r, fetchOpts, err := ParseRequest(context.TODO(), req, opts)
require.Nil(t, err, "unable to parse request")
require.Equal(t, len(r.Queries), 1)
+ fmt.Println(fetchOpts)
}
func TestPromFetchTimeoutParsing(t *testing.T) {
- ctrl := gomock.NewController(t)
- storage, _ := m3.NewStorageAndSession(t, ctrl)
- opts := handleroptions.FetchOptionsBuilderOptions{Limit: 100}
- engine := newEngine(storage, defaultLookbackDuration, nil,
- instrument.NewOptions())
- promRead := &PromReadHandler{
- engine: engine,
- promReadMetrics: promReadTestMetrics,
- timeoutOpts: &prometheus.TimeoutOpts{
- FetchTimeout: 2 * time.Minute,
- },
- fetchOptionsBuilder: handleroptions.NewFetchOptionsBuilder(opts),
- }
-
- req := httptest.NewRequest("POST", PromReadURL, test.GeneratePromReadBody(t))
- dur, err := prometheus.ParseRequestTimeout(req, promRead.timeoutOpts.FetchTimeout)
+ url := fmt.Sprintf("%s?timeout=2m", PromReadURL)
+ req := httptest.NewRequest("POST", url, test.GeneratePromReadBody(t))
+ dur, err := prometheus.ParseRequestTimeout(req, time.Second)
require.NoError(t, err)
assert.Equal(t, 2*time.Minute, dur)
}
func TestPromReadParsingBad(t *testing.T) {
- ctrl := gomock.NewController(t)
- storage, _ := m3.NewStorageAndSession(t, ctrl)
- promRead := readHandler(storage, timeoutOpts)
req := httptest.NewRequest("POST", PromReadURL, strings.NewReader("bad body"))
- _, err := promRead.parseRequest(req)
+ _, _, err := ParseRequest(context.TODO(), req, options.EmptyHandlerOptions())
require.NotNil(t, err, "unable to parse request")
}
func TestPromReadStorageWithFetchError(t *testing.T) {
- ctrl := gomock.NewController(t)
- store, session := m3.NewStorageAndSession(t, ctrl)
- session.EXPECT().FetchTagged(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil, client.FetchResponseMetadata{Exhaustive: true}, fmt.Errorf("unable to get data"))
- session.EXPECT().IteratorPools().
- Return(nil, nil)
- promRead := readHandler(store, timeoutOpts)
- req := test.GeneratePromReadRequest()
- recorder := httptest.NewRecorder()
- res, err := promRead.read(context.TODO(), recorder,
- req, time.Hour, storage.NewFetchOptions())
+ ctrl := xtest.NewController(t)
+ watcher := &cancelWatcher{}
+ readRequest := &prompb.ReadRequest{
+ Queries: []*prompb.Query{
+ {},
+ },
+ }
+
+ fetchOpts := &storage.FetchOptions{}
+ result := storage.PromResult{Metadata: block.ResultMetadata{
+ Exhaustive: true, LocalOnly: true}}
+ engine := executor.NewMockEngine(ctrl)
+ engine.EXPECT().
+ ExecuteProm(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
+ Return(result, fmt.Errorf("expr err"))
+
+ opts := options.EmptyHandlerOptions().SetEngine(engine)
+ res, err := Read(context.TODO(), watcher, readRequest, fetchOpts, opts)
require.Error(t, err, "unable to read from storage")
- header := recorder.Header().Get(handleroptions.LimitHeader)
- assert.Equal(t, 0, len(header))
- meta := res.meta
+ meta := res.Meta
assert.True(t, meta.Exhaustive)
assert.True(t, meta.LocalOnly)
assert.Equal(t, 0, len(meta.Warnings))
+
+ assert.Equal(t, 1, watcher.count)
}
func TestQueryMatchMustBeEqual(t *testing.T) {
@@ -189,7 +260,7 @@ func TestQueryKillOnClientDisconnect(t *testing.T) {
Timeout: 1 * time.Millisecond,
}
- _, err := c.Post(server.URL, "application/x-protobuf", test.GeneratePromReadBody(t))
+ _, err := c.Post(server.URL, xhttp.ContentTypeProtobuf, test.GeneratePromReadBody(t))
assert.Error(t, err)
}
@@ -198,7 +269,7 @@ func TestQueryKillOnTimeout(t *testing.T) {
defer server.Close()
req, _ := http.NewRequest("POST", server.URL, test.GeneratePromReadBody(t))
- req.Header.Add("Content-Type", "application/x-protobuf")
+ req.Header.Add(xhttp.HeaderContentType, xhttp.ContentTypeProtobuf)
req.Header.Add("timeout", "1ms")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
@@ -208,7 +279,9 @@ func TestQueryKillOnTimeout(t *testing.T) {
}
func TestReadErrorMetricsCount(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
storage, session := m3.NewStorageAndSession(t, ctrl)
session.EXPECT().FetchTagged(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil, client.FetchResponseMetadata{Exhaustive: true}, fmt.Errorf("unable to get data"))
@@ -219,15 +292,20 @@ func TestReadErrorMetricsCount(t *testing.T) {
scope, closer := tally.NewRootScope(tally.ScopeOptions{Reporter: reporter}, time.Millisecond)
defer closer.Close()
readMetrics := newPromReadMetrics(scope)
- opts := handleroptions.FetchOptionsBuilderOptions{Limit: 100}
+ buildOpts := handleroptions.FetchOptionsBuilderOptions{
+ Limits: handleroptions.FetchOptionsBuilderLimitsOptions{
+ SeriesLimit: 100,
+ },
+ }
engine := newEngine(storage, defaultLookbackDuration, nil,
instrument.NewOptions())
- promRead := &PromReadHandler{
- engine: engine,
- promReadMetrics: readMetrics,
- timeoutOpts: timeoutOpts,
- fetchOptionsBuilder: handleroptions.NewFetchOptionsBuilder(opts),
- instrumentOpts: instrument.NewOptions(),
+ opts := options.EmptyHandlerOptions().
+ SetEngine(engine).
+ SetTimeoutOpts(&prometheus.TimeoutOpts{FetchTimeout: time.Minute}).
+ SetFetchOptionsBuilder(handleroptions.NewFetchOptionsBuilder(buildOpts))
+ promRead := &promReadHandler{
+ promReadMetrics: readMetrics,
+ opts: opts,
}
req := httptest.NewRequest("POST", PromReadURL, test.GeneratePromReadBody(t))
@@ -240,7 +318,7 @@ func TestReadErrorMetricsCount(t *testing.T) {
}
func TestMultipleRead(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
now := time.Now()
@@ -249,7 +327,7 @@ func TestMultipleRead(t *testing.T) {
r := storage.PromResult{
PromResult: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
- &prompb.TimeSeries{
+ {
Samples: []prompb.Sample{{Value: 1, Timestamp: promNow}},
Labels: []prompb.Label{{Name: []byte("a"), Value: []byte("b")}},
},
@@ -258,14 +336,14 @@ func TestMultipleRead(t *testing.T) {
Metadata: block.ResultMetadata{
Exhaustive: true,
LocalOnly: true,
- Warnings: []block.Warning{block.Warning{Name: "foo", Message: "bar"}},
+ Warnings: []block.Warning{{Name: "foo", Message: "bar"}},
},
}
rTwo := storage.PromResult{
PromResult: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
- &prompb.TimeSeries{
+ {
Samples: []prompb.Sample{{Value: 2, Timestamp: promNow}},
Labels: []prompb.Label{{Name: []byte("c"), Value: []byte("d")}},
},
@@ -280,8 +358,8 @@ func TestMultipleRead(t *testing.T) {
req := &prompb.ReadRequest{
Queries: []*prompb.Query{
- {StartTimestampMs: 10},
- {StartTimestampMs: 20},
+ {StartTimestampMs: 10, EndTimestampMs: 100},
+ {StartTimestampMs: 20, EndTimestampMs: 200},
},
}
@@ -305,35 +383,38 @@ func TestMultipleRead(t *testing.T) {
},
})
- h := NewPromReadHandler(handlerOpts).(*PromReadHandler)
- res, err := h.read(context.TODO(), nil, req, 0, storage.NewFetchOptions())
+ fetchOpts := &storage.FetchOptions{}
+ watcher := &cancelWatcher{}
+ res, err := Read(context.TODO(), watcher, req, fetchOpts, handlerOpts)
require.NoError(t, err)
expected := &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
- &prompb.TimeSeries{
+ {
Labels: []prompb.Label{{Name: []byte("a"), Value: []byte("b")}},
Samples: []prompb.Sample{{Timestamp: promNow, Value: 1}},
},
- &prompb.TimeSeries{
+ {
Labels: []prompb.Label{{Name: []byte("c"), Value: []byte("d")}},
Samples: []prompb.Sample{{Timestamp: promNow, Value: 2}},
},
},
}
- result := res.result
+ result := res.Result
assert.Equal(t, expected.Timeseries[0], result[0].Timeseries[0])
assert.Equal(t, expected.Timeseries[1], result[1].Timeseries[0])
- meta := res.meta
+ meta := res.Meta
assert.False(t, meta.Exhaustive)
assert.True(t, meta.LocalOnly)
require.Equal(t, 1, len(meta.Warnings))
assert.Equal(t, "foo_bar", meta.Warnings[0].Header())
+
+ assert.Equal(t, 2, watcher.count)
}
func TestReadWithOptions(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
now := time.Now()
@@ -342,7 +423,7 @@ func TestReadWithOptions(t *testing.T) {
r := storage.PromResult{
PromResult: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
- &prompb.TimeSeries{
+ {
Samples: []prompb.Sample{{Value: 1, Timestamp: promNow}},
Labels: []prompb.Label{
{Name: []byte("a"), Value: []byte("b")},
@@ -355,7 +436,7 @@ func TestReadWithOptions(t *testing.T) {
}
req := &prompb.ReadRequest{
- Queries: []*prompb.Query{{StartTimestampMs: 10}},
+ Queries: []*prompb.Query{{StartTimestampMs: 10, EndTimestampMs: 100}},
}
q, err := storage.PromReadQueryToM3(req.Queries[0])
@@ -366,8 +447,8 @@ func TestReadWithOptions(t *testing.T) {
ExecuteProm(gomock.Any(), q, gomock.Any(), gomock.Any()).
Return(r, nil)
- opts := storage.NewFetchOptions()
- opts.RestrictQueryOptions = &storage.RestrictQueryOptions{
+ fetchOpts := storage.NewFetchOptions()
+ fetchOpts.RestrictQueryOptions = &storage.RestrictQueryOptions{
RestrictByTag: &storage.RestrictByTag{
Strip: [][]byte{[]byte("remove")},
},
@@ -380,18 +461,30 @@ func TestReadWithOptions(t *testing.T) {
},
})
- h := NewPromReadHandler(handlerOpts).(*PromReadHandler)
- res, err := h.read(context.TODO(), nil, req, 0, opts)
+ res, err := Read(context.TODO(), nil, req, fetchOpts, handlerOpts)
require.NoError(t, err)
expected := &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
- &prompb.TimeSeries{
+ {
Labels: []prompb.Label{{Name: []byte("a"), Value: []byte("b")}},
Samples: []prompb.Sample{{Timestamp: promNow, Value: 1}},
},
},
}
- result := res.result
+ result := res.Result
assert.Equal(t, expected.Timeseries[0], result[0].Timeseries[0])
}
+
+type cancelWatcher struct {
+ sync.Mutex
+ count int
+}
+
+var _ handler.CancelWatcher = (*cancelWatcher)(nil)
+
+func (c *cancelWatcher) WatchForCancel(context.Context, context.CancelFunc) {
+ c.Lock()
+ c.count++
+ c.Unlock()
+}
diff --git a/src/query/api/v1/handler/prometheus/remote/tag_values.go b/src/query/api/v1/handler/prometheus/remote/tag_values.go
index d0d5b0caf4..2a40f3cc1e 100644
--- a/src/query/api/v1/handler/prometheus/remote/tag_values.go
+++ b/src/query/api/v1/handler/prometheus/remote/tag_values.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/query/errors"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
@@ -63,7 +64,7 @@ type TagValuesHandler struct {
// TagValuesResponse is the response that gets returned to the user
type TagValuesResponse struct {
- Results storage.CompleteTagsResult `json:"results,omitempty"`
+ Results consolidators.CompleteTagsResult `json:"results,omitempty"`
}
// NewTagValuesHandler returns a new instance of handler.
@@ -79,7 +80,7 @@ func NewTagValuesHandler(options options.HandlerOptions) http.Handler {
func (h *TagValuesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), handler.HeaderKey, r.Header)
logger := logging.WithContext(ctx, h.instrumentOpts)
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
query, err := h.parseTagValuesToQuery(r)
if err != nil {
diff --git a/src/query/api/v1/handler/prometheus/remote/tag_values_test.go b/src/query/api/v1/handler/prometheus/remote/tag_values_test.go
index 03139e6131..23309b5f89 100644
--- a/src/query/api/v1/handler/prometheus/remote/tag_values_test.go
+++ b/src/query/api/v1/handler/prometheus/remote/tag_values_test.go
@@ -34,6 +34,8 @@ import (
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/x/headers"
"github.com/golang/mock/gomock"
"github.com/gorilla/mux"
@@ -101,8 +103,8 @@ func TestTagValues(t *testing.T) {
return now
}
- fb := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ fb := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetStorage(store).
SetNowFn(nowFn).
@@ -131,9 +133,9 @@ func TestTagValues(t *testing.T) {
filterTag: tt.name,
}
- storeResult := &storage.CompleteTagsResult{
+ storeResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
- CompletedTags: []storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
{
Name: b(tt.name),
Values: bs("a", "b", "c", tt.name),
@@ -157,9 +159,8 @@ func TestTagValues(t *testing.T) {
ex := fmt.Sprintf(`{"status":"success","data":["a","b","c","%s"]}`, tt.name)
assert.Equal(t, ex, string(read))
- warning := rr.Header().Get(handleroptions.LimitHeader)
- exWarn := fmt.Sprintf("%s,foo_bar",
- handleroptions.LimitHeaderSeriesLimitApplied)
+ warning := rr.Header().Get(headers.LimitHeader)
+ exWarn := fmt.Sprintf("%s,foo_bar", headers.LimitHeaderSeriesLimitApplied)
assert.Equal(t, exWarn, warning)
}
}
@@ -175,8 +176,8 @@ func TestTagValueErrors(t *testing.T) {
return now
}
- fb := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ fb := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetStorage(store).
SetNowFn(nowFn).
diff --git a/src/query/api/v1/handler/prometheus/remote/test/read.go b/src/query/api/v1/handler/prometheus/remote/test/read.go
index 98eec49b6d..8cde943cb6 100644
--- a/src/query/api/v1/handler/prometheus/remote/test/read.go
+++ b/src/query/api/v1/handler/prometheus/remote/test/read.go
@@ -30,6 +30,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/snappy"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
@@ -43,7 +44,7 @@ func GeneratePromReadRequest() *prompb.ReadRequest {
EndTimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
Matchers: []*prompb.LabelMatcher{
&prompb.LabelMatcher{
- Name: []byte("__name__"),
+ Name: []byte(model.MetricNameLabel),
Value: []byte("first"),
Type: prompb.LabelMatcher_EQ,
},
diff --git a/src/query/api/v1/handler/prometheus/remote/test/write.go b/src/query/api/v1/handler/prometheus/remote/test/write.go
index 1533074b79..7454414652 100644
--- a/src/query/api/v1/handler/prometheus/remote/test/write.go
+++ b/src/query/api/v1/handler/prometheus/remote/test/write.go
@@ -23,13 +23,13 @@ package test
import (
"bytes"
"io"
- "testing"
"time"
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/golang/protobuf/proto"
"github.com/golang/snappy"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
@@ -39,7 +39,7 @@ func GeneratePromWriteRequest() *prompb.WriteRequest {
req := &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{{
Labels: []prompb.Label{
- {Name: []byte("__name__"), Value: []byte("first")},
+ {Name: []byte(model.MetricNameLabel), Value: []byte("first")},
{Name: []byte("foo"), Value: []byte("bar")},
{Name: []byte("biz"), Value: []byte("baz")},
},
@@ -50,7 +50,7 @@ func GeneratePromWriteRequest() *prompb.WriteRequest {
},
{
Labels: []prompb.Label{
- {Name: []byte("__name__"), Value: []byte("second")},
+ {Name: []byte(model.MetricNameLabel), Value: []byte("second")},
{Name: []byte("foo"), Value: []byte("qux")},
{Name: []byte("bar"), Value: []byte("baz")},
},
@@ -66,12 +66,21 @@ func GeneratePromWriteRequest() *prompb.WriteRequest {
// GeneratePromWriteRequestBody generates a Prometheus remote
// write request body.
func GeneratePromWriteRequestBody(
- t *testing.T,
+ t require.TestingT,
req *prompb.WriteRequest,
) io.Reader {
+ return bytes.NewReader(GeneratePromWriteRequestBodyBytes(t, req))
+}
+
+// GeneratePromWriteRequestBodyBytes generates a Prometheus remote
+// write request body.
+func GeneratePromWriteRequestBodyBytes(
+ t require.TestingT,
+ req *prompb.WriteRequest,
+) []byte {
data, err := proto.Marshal(req)
require.NoError(t, err)
compressed := snappy.Encode(nil, data)
- return bytes.NewReader(compressed)
+ return compressed
}
diff --git a/src/query/api/v1/handler/prometheus/remote/write.go b/src/query/api/v1/handler/prometheus/remote/write.go
index 40052b6bd8..d1ecebc7d4 100644
--- a/src/query/api/v1/handler/prometheus/remote/write.go
+++ b/src/query/api/v1/handler/prometheus/remote/write.go
@@ -23,6 +23,7 @@ package remote
import (
"bytes"
"context"
+ "encoding/json"
"errors"
"fmt"
"io/ioutil"
@@ -40,12 +41,15 @@ import (
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/clock"
xerrors "github.com/m3db/m3/src/x/errors"
+ "github.com/m3db/m3/src/x/headers"
"github.com/m3db/m3/src/x/instrument"
xhttp "github.com/m3db/m3/src/x/net/http"
+ "github.com/m3db/m3/src/x/retry"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
@@ -73,6 +77,22 @@ var (
errNoTagOptions = errors.New("no tag options set")
errNoNowFn = errors.New("no now fn set")
errUnaggregatedStoragePolicySet = errors.New("storage policy should not be set for unaggregated metrics")
+
+ defaultForwardingRetryForever = false
+ defaultForwardingRetryJitter = true
+ defaultForwardRetryConfig = retry.Configuration{
+ InitialBackoff: time.Second * 2,
+ BackoffFactor: 2,
+ MaxRetries: 1,
+ Forever: &defaultForwardingRetryForever,
+ Jitter: &defaultForwardingRetryJitter,
+ }
+
+ defaultValue = ingest.IterValue{
+ Tags: models.EmptyTags(),
+ Attributes: ts.DefaultSeriesAttributes(),
+ Metadata: ts.Metadata{},
+ }
)
// PromWriteHandler represents a handler for prometheus write endpoint.
@@ -84,6 +104,7 @@ type PromWriteHandler struct {
forwardHTTPClient *http.Client
forwardingBoundWorkers xsync.WorkerPool
forwardContext context.Context
+ forwardRetrier retry.Retrier
nowFn clock.NowFn
instrumentOpts instrument.Options
metrics promWriteMetrics
@@ -111,9 +132,10 @@ func NewPromWriteHandler(options options.HandlerOptions) (http.Handler, error) {
return nil, errNoNowFn
}
- metrics, err := newPromWriteMetrics(options.InstrumentOpts().MetricsScope().
- Tagged(map[string]string{"handler": "remote-write"}),
- )
+ scope := options.InstrumentOpts().
+ MetricsScope().
+ Tagged(map[string]string{"handler": "remote-write"})
+ metrics, err := newPromWriteMetrics(scope)
if err != nil {
return nil, err
}
@@ -135,6 +157,14 @@ func NewPromWriteHandler(options options.HandlerOptions) (http.Handler, error) {
forwardHTTPOpts.DisableCompression = true // Already snappy compressed.
forwardHTTPOpts.RequestTimeout = forwardTimeout
+ forwardRetryConfig := defaultForwardRetryConfig
+ if forwarding.Retry != nil {
+ forwardRetryConfig = *forwarding.Retry
+ }
+ forwardRetryOpts := forwardRetryConfig.NewOptions(
+ scope.SubScope("forwarding-retry"),
+ )
+
return &PromWriteHandler{
downsamplerAndWriter: downsamplerAndWriter,
tagOptions: tagOptions,
@@ -143,6 +173,7 @@ func NewPromWriteHandler(options options.HandlerOptions) (http.Handler, error) {
forwardHTTPClient: xhttp.NewHTTPClient(forwardHTTPOpts),
forwardingBoundWorkers: forwardingBoundWorkers,
forwardContext: context.Background(),
+ forwardRetrier: retry.NewRetrier(forwardRetryOpts),
nowFn: nowFn,
metrics: metrics,
instrumentOpts: instrumentOpts,
@@ -150,15 +181,17 @@ func NewPromWriteHandler(options options.HandlerOptions) (http.Handler, error) {
}
type promWriteMetrics struct {
- writeSuccess tally.Counter
- writeErrorsServer tally.Counter
- writeErrorsClient tally.Counter
- ingestLatency tally.Histogram
- ingestLatencyBuckets tally.DurationBuckets
- forwardSuccess tally.Counter
- forwardErrors tally.Counter
- forwardDropped tally.Counter
- forwardLatency tally.Histogram
+ writeSuccess tally.Counter
+ writeErrorsServer tally.Counter
+ writeErrorsClient tally.Counter
+ writeBatchLatency tally.Histogram
+ writeBatchLatencyBuckets tally.DurationBuckets
+ ingestLatency tally.Histogram
+ ingestLatencyBuckets tally.DurationBuckets
+ forwardSuccess tally.Counter
+ forwardErrors tally.Counter
+ forwardDropped tally.Counter
+ forwardLatency tally.Histogram
}
func newPromWriteMetrics(scope tally.Scope) (promWriteMetrics, error) {
@@ -194,6 +227,12 @@ func newPromWriteMetrics(scope tally.Scope) (promWriteMetrics, error) {
}
upTo24hBuckets = upTo24hBuckets[1:] // Remove the first 6h to get 1 hour aligned buckets
+ var writeLatencyBuckets tally.DurationBuckets
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo1sBuckets...)
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo10sBuckets...)
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo60sBuckets...)
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo60mBuckets...)
+
var ingestLatencyBuckets tally.DurationBuckets
ingestLatencyBuckets = append(ingestLatencyBuckets, upTo1sBuckets...)
ingestLatencyBuckets = append(ingestLatencyBuckets, upTo10sBuckets...)
@@ -201,26 +240,25 @@ func newPromWriteMetrics(scope tally.Scope) (promWriteMetrics, error) {
ingestLatencyBuckets = append(ingestLatencyBuckets, upTo60mBuckets...)
ingestLatencyBuckets = append(ingestLatencyBuckets, upTo6hBuckets...)
ingestLatencyBuckets = append(ingestLatencyBuckets, upTo24hBuckets...)
-
- var forwardLatencyBuckets tally.DurationBuckets
- forwardLatencyBuckets = append(forwardLatencyBuckets, upTo1sBuckets...)
- forwardLatencyBuckets = append(forwardLatencyBuckets, upTo10sBuckets...)
- forwardLatencyBuckets = append(forwardLatencyBuckets, upTo60sBuckets...)
- forwardLatencyBuckets = append(forwardLatencyBuckets, upTo60mBuckets...)
return promWriteMetrics{
- writeSuccess: scope.SubScope("write").Counter("success"),
- writeErrorsServer: scope.SubScope("write").Tagged(map[string]string{"code": "5XX"}).Counter("errors"),
- writeErrorsClient: scope.SubScope("write").Tagged(map[string]string{"code": "4XX"}).Counter("errors"),
- ingestLatency: scope.SubScope("ingest").Histogram("latency", ingestLatencyBuckets),
- ingestLatencyBuckets: ingestLatencyBuckets,
- forwardSuccess: scope.SubScope("forward").Counter("success"),
- forwardErrors: scope.SubScope("forward").Counter("errors"),
- forwardDropped: scope.SubScope("forward").Counter("dropped"),
- forwardLatency: scope.SubScope("forward").Histogram("latency", forwardLatencyBuckets),
+ writeSuccess: scope.SubScope("write").Counter("success"),
+ writeErrorsServer: scope.SubScope("write").Tagged(map[string]string{"code": "5XX"}).Counter("errors"),
+ writeErrorsClient: scope.SubScope("write").Tagged(map[string]string{"code": "4XX"}).Counter("errors"),
+ writeBatchLatency: scope.SubScope("write").Histogram("batch-latency", writeLatencyBuckets),
+ writeBatchLatencyBuckets: writeLatencyBuckets,
+ ingestLatency: scope.SubScope("ingest").Histogram("latency", ingestLatencyBuckets),
+ ingestLatencyBuckets: ingestLatencyBuckets,
+ forwardSuccess: scope.SubScope("forward").Counter("success"),
+ forwardErrors: scope.SubScope("forward").Counter("errors"),
+ forwardDropped: scope.SubScope("forward").Counter("dropped"),
+ forwardLatency: scope.SubScope("forward").Histogram("latency", writeLatencyBuckets),
}, nil
}
func (h *PromWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ batchRequestStopwatch := h.metrics.writeBatchLatency.Start()
+ defer batchRequestStopwatch.Stop()
+
req, opts, result, rErr := h.parseRequest(r)
if rErr != nil {
h.metrics.writeErrorsClient.Inc(1)
@@ -236,17 +274,31 @@ func (h *PromWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
for _, target := range targets {
target := target // Capture for lambda.
forward := func() {
- // Consider propgating baggage without tying
- // context to request context in future.
- ctx, cancel := context.WithTimeout(h.forwardContext, h.forwardTimeout)
- defer cancel()
+ now := h.nowFn()
+ err := h.forwardRetrier.Attempt(func() error {
+ // Consider propagating baggage without tying
+ // context to request context in future.
+ ctx, cancel := context.WithTimeout(h.forwardContext, h.forwardTimeout)
+ defer cancel()
+ return h.forward(ctx, result, r.Header, target)
+ })
+
+ // Record forward ingestion delay.
+ // NB: this includes any time for retries.
+ for _, series := range req.Timeseries {
+ for _, sample := range series.Samples {
+ age := now.Sub(storage.PromTimestampToTime(sample.Timestamp))
+ h.metrics.forwardLatency.RecordDuration(age)
+ }
+ }
- if err := h.forward(ctx, result, target); err != nil {
+ if err != nil {
h.metrics.forwardErrors.Inc(1)
logger := logging.WithContext(h.forwardContext, h.instrumentOpts)
logger.Error("forward error", zap.Error(err))
return
}
+
h.metrics.forwardSuccess.Inc(1)
}
@@ -339,14 +391,21 @@ func (h *PromWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.metrics.writeSuccess.Inc(1)
}
+// parseRequest extracts the Prometheus write request from the request body and
+// headers. WARNING: it is not guaranteed that the tags returned in the request
+// body are in sorted order. It is expected that the caller ensures the tags are
+// sorted before passing them to storage, which currently happens in write() ->
+// newTSPromIter() -> storage.PromLabelsToM3Tags() -> tags.AddTags(). This is
+// the only path written metrics are processed, but future write paths must
+// uphold the same guarantees.
func (h *PromWriteHandler) parseRequest(
r *http.Request,
) (*prompb.WriteRequest, ingest.WriteOptions, prometheus.ParsePromCompressedRequestResult, *xhttp.ParseError) {
var opts ingest.WriteOptions
- if v := strings.TrimSpace(r.Header.Get(handleroptions.MetricsTypeHeader)); v != "" {
+ if v := strings.TrimSpace(r.Header.Get(headers.MetricsTypeHeader)); v != "" {
// Allow the metrics type and storage policies to override
// the default rules and policies if specified.
- metricsType, err := storage.ParseMetricsType(v)
+ metricsType, err := storagemetadata.ParseMetricsType(v)
if err != nil {
return nil, ingest.WriteOptions{},
prometheus.ParsePromCompressedRequestResult{},
@@ -359,9 +418,9 @@ func (h *PromWriteHandler) parseRequest(
opts.DownsampleOverride = true
opts.DownsampleMappingRules = nil
- strPolicy := strings.TrimSpace(r.Header.Get(handleroptions.MetricsStoragePolicyHeader))
+ strPolicy := strings.TrimSpace(r.Header.Get(headers.MetricsStoragePolicyHeader))
switch metricsType {
- case storage.UnaggregatedMetricsType:
+ case storagemetadata.UnaggregatedMetricsType:
if strPolicy != emptyStoragePolicyVar {
err := errUnaggregatedStoragePolicySet
return nil, ingest.WriteOptions{},
@@ -384,6 +443,19 @@ func (h *PromWriteHandler) parseRequest(
}
}
}
+ if v := strings.TrimSpace(r.Header.Get(headers.WriteTypeHeader)); v != "" {
+ switch v {
+ case headers.DefaultWriteType:
+ case headers.AggregateWriteType:
+ opts.WriteOverride = true
+ opts.WriteStoragePolicies = policy.StoragePolicies{}
+ default:
+ err := fmt.Errorf("unrecognized write type: %s", v)
+ return nil, ingest.WriteOptions{},
+ prometheus.ParsePromCompressedRequestResult{},
+ xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+ }
result, err := prometheus.ParsePromCompressedRequest(r)
if err != nil {
@@ -398,6 +470,19 @@ func (h *PromWriteHandler) parseRequest(
xhttp.NewParseError(err, http.StatusBadRequest)
}
+ if mapStr := r.Header.Get(headers.MapTagsByJSONHeader); mapStr != "" {
+ var opts handleroptions.MapTagsOptions
+ if err := json.Unmarshal([]byte(mapStr), &opts); err != nil {
+ return nil, ingest.WriteOptions{}, prometheus.ParsePromCompressedRequestResult{},
+ xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+
+ if err := mapTags(&req, opts); err != nil {
+ return nil, ingest.WriteOptions{}, prometheus.ParsePromCompressedRequestResult{},
+ xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+ }
+
return &req, opts, result, nil
}
@@ -406,13 +491,18 @@ func (h *PromWriteHandler) write(
r *prompb.WriteRequest,
opts ingest.WriteOptions,
) ingest.BatchError {
- iter := newPromTSIter(r.Timeseries, h.tagOptions)
+ iter, err := newPromTSIter(r.Timeseries, h.tagOptions)
+ if err != nil {
+ var errs xerrors.MultiError
+ return errs.Add(err)
+ }
return h.downsamplerAndWriter.WriteBatch(ctx, iter, opts)
}
func (h *PromWriteHandler) forward(
ctx context.Context,
request prometheus.ParsePromCompressedRequestResult,
+ header http.Header,
target handleroptions.PromWriteHandlerForwardTargetOptions,
) error {
method := target.Method
@@ -425,6 +515,25 @@ func (h *PromWriteHandler) forward(
return err
}
+ // There are multiple headers that impact coordinator behavior on the write
+ // (map tags, storage policy, etc.) that we must forward to the target
+ // coordinator to guarantee same behavior as the coordinator that originally
+ // received the request.
+ if header != nil {
+ for h := range header {
+ if strings.HasPrefix(h, headers.M3HeaderPrefix) {
+ req.Header.Add(h, header.Get(h))
+ }
+ }
+ }
+
+ if targetHeaders := target.Headers; targetHeaders != nil {
+ // If headers set, attach to request.
+ for name, value := range targetHeaders {
+ req.Header.Add(name, value)
+ }
+ }
+
resp, err := h.forwardHTTPClient.Do(req.WithContext(ctx))
if err != nil {
return err
@@ -440,32 +549,51 @@ func (h *PromWriteHandler) forward(
return fmt.Errorf("expected status code 2XX: actual=%v, method=%v, url=%v, resp=%s",
resp.StatusCode, method, url, response)
}
+
return nil
}
-func newPromTSIter(timeseries []prompb.TimeSeries, tagOpts models.TagOptions) *promTSIter {
+func newPromTSIter(timeseries []prompb.TimeSeries, tagOpts models.TagOptions) (*promTSIter, error) {
// Construct the tags and datapoints upfront so that if the iterator
// is reset, we don't have to generate them twice.
var (
- tags = make([]models.Tags, 0, len(timeseries))
- datapoints = make([]ts.Datapoints, 0, len(timeseries))
+ tags = make([]models.Tags, 0, len(timeseries))
+ datapoints = make([]ts.Datapoints, 0, len(timeseries))
+ seriesAttributes = make([]ts.SeriesAttributes, 0, len(timeseries))
)
+
+ graphiteTagOpts := tagOpts.SetIDSchemeType(models.TypeGraphite)
for _, promTS := range timeseries {
- tags = append(tags, storage.PromLabelsToM3Tags(promTS.Labels, tagOpts))
+ attributes, err := storage.PromTimeSeriesToSeriesAttributes(promTS)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set the tag options based on the incoming source.
+ opts := tagOpts
+ if attributes.Source == ts.SourceTypeGraphite {
+ opts = graphiteTagOpts
+ }
+
+ seriesAttributes = append(seriesAttributes, attributes)
+ tags = append(tags, storage.PromLabelsToM3Tags(promTS.Labels, opts))
datapoints = append(datapoints, storage.PromSamplesToM3Datapoints(promTS.Samples))
}
return &promTSIter{
+ attributes: seriesAttributes,
idx: -1,
tags: tags,
datapoints: datapoints,
- }
+ }, nil
}
type promTSIter struct {
idx int
+ attributes []ts.SeriesAttributes
tags []models.Tags
datapoints []ts.Datapoints
+ metadatas []ts.Metadata
}
func (i *promTSIter) Next() bool {
@@ -473,12 +601,21 @@ func (i *promTSIter) Next() bool {
return i.idx < len(i.tags)
}
-func (i *promTSIter) Current() (models.Tags, ts.Datapoints, xtime.Unit, []byte) {
+func (i *promTSIter) Current() ingest.IterValue {
if len(i.tags) == 0 || i.idx < 0 || i.idx >= len(i.tags) {
- return models.EmptyTags(), nil, 0, nil
+ return defaultValue
}
- return i.tags[i.idx], i.datapoints[i.idx], xtime.Millisecond, nil
+ value := ingest.IterValue{
+ Tags: i.tags[i.idx],
+ Datapoints: i.datapoints[i.idx],
+ Attributes: i.attributes[i.idx],
+ Unit: xtime.Millisecond,
+ }
+ if i.idx < len(i.metadatas) {
+ value.Metadata = i.metadatas[i.idx]
+ }
+ return value
}
func (i *promTSIter) Reset() error {
@@ -489,3 +626,13 @@ func (i *promTSIter) Reset() error {
func (i *promTSIter) Error() error {
return nil
}
+
+func (i *promTSIter) SetCurrentMetadata(metadata ts.Metadata) {
+ if len(i.metadatas) == 0 {
+ i.metadatas = make([]ts.Metadata, len(i.tags))
+ }
+ if i.idx < 0 || i.idx >= len(i.metadatas) {
+ return
+ }
+ i.metadatas[i.idx] = metadata
+}
diff --git a/src/query/api/v1/handler/prometheus/remote/write_test.go b/src/query/api/v1/handler/prometheus/remote/write_test.go
index ec61b217c6..b5c6218e5d 100644
--- a/src/query/api/v1/handler/prometheus/remote/write_test.go
+++ b/src/query/api/v1/handler/prometheus/remote/write_test.go
@@ -38,9 +38,10 @@ import (
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote/test"
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/models"
- "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
xclock "github.com/m3db/m3/src/x/clock"
xerrors "github.com/m3db/m3/src/x/errors"
+ "github.com/m3db/m3/src/x/headers"
"github.com/m3db/m3/src/x/instrument"
"github.com/golang/mock/gomock"
@@ -238,8 +239,8 @@ func TestPromWriteUnaggregatedMetricsWithHeader(t *testing.T) {
promReq := test.GeneratePromWriteRequest()
promReqBody := test.GeneratePromWriteRequestBody(t, promReq)
req := httptest.NewRequest(PromWriteHTTPMethod, PromWriteURL, promReqBody)
- req.Header.Add(handleroptions.MetricsTypeHeader,
- storage.UnaggregatedMetricsType.String())
+ req.Header.Add(headers.MetricsTypeHeader,
+ storagemetadata.UnaggregatedMetricsType.String())
writer := httptest.NewRecorder()
handler.ServeHTTP(writer, req)
@@ -272,9 +273,9 @@ func TestPromWriteAggregatedMetricsWithHeader(t *testing.T) {
promReq := test.GeneratePromWriteRequest()
promReqBody := test.GeneratePromWriteRequestBody(t, promReq)
req := httptest.NewRequest(PromWriteHTTPMethod, PromWriteURL, promReqBody)
- req.Header.Add(handleroptions.MetricsTypeHeader,
- storage.AggregatedMetricsType.String())
- req.Header.Add(handleroptions.MetricsStoragePolicyHeader,
+ req.Header.Add(headers.MetricsTypeHeader,
+ storagemetadata.AggregatedMetricsType.String())
+ req.Header.Add(headers.MetricsStoragePolicyHeader,
"1m:21d")
writer := httptest.NewRecorder()
@@ -282,3 +283,28 @@ func TestPromWriteAggregatedMetricsWithHeader(t *testing.T) {
resp := writer.Result()
require.Equal(t, http.StatusOK, resp.StatusCode)
}
+
+func BenchmarkWriteDatapoints(b *testing.B) {
+ ctrl := gomock.NewController(b)
+ defer ctrl.Finish()
+
+ mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl)
+ mockDownsamplerAndWriter.
+ EXPECT().
+ WriteBatch(gomock.Any(), gomock.Any(), gomock.Any()).
+ AnyTimes()
+
+ opts := makeOptions(mockDownsamplerAndWriter)
+ handler, err := NewPromWriteHandler(opts)
+ require.NoError(b, err)
+
+ promReq := test.GeneratePromWriteRequest()
+ promReqBody := test.GeneratePromWriteRequestBodyBytes(b, promReq)
+ promReqBodyReader := bytes.NewReader(nil)
+
+ for i := 0; i < b.N; i++ {
+ promReqBodyReader.Reset(promReqBody)
+ req := httptest.NewRequest(PromWriteHTTPMethod, PromWriteURL, promReqBodyReader)
+ handler.ServeHTTP(httptest.NewRecorder(), req)
+ }
+}
diff --git a/src/query/api/v1/handler/prometheus/response.go b/src/query/api/v1/handler/prometheus/response.go
new file mode 100644
index 0000000000..a28384e162
--- /dev/null
+++ b/src/query/api/v1/handler/prometheus/response.go
@@ -0,0 +1,405 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Response represents Prometheus's query response.
+type Response struct {
+ // Status is the response status.
+ Status string `json:"status"`
+ // Data is the response data.
+ Data data `json:"data"`
+}
+
+type data struct {
+ // ResultType is the type of Result (matrix, vector, etc.).
+ ResultType string
+ // Result contains the query result (concrete type depends on ResultType).
+ Result result
+}
+
+type result interface {
+ matches(other result) (MatchInformation, error)
+}
+
+// MatrixResult contains a list matrixRow.
+type MatrixResult struct {
+ Result []matrixRow `json:"result"`
+}
+
+// VectorResult contains a list of vectorItem.
+type VectorResult struct {
+ Result []vectorItem `json:"result"`
+}
+
+// ScalarResult is the scalar Value for the response.
+type ScalarResult struct {
+ Result Value `json:"result"`
+}
+
+// StringResult is the string Value for the response.
+type StringResult struct {
+ Result Value `json:"result"`
+}
+
+// UnmarshalJSON unmarshals the data struct of query response.
+func (d *data) UnmarshalJSON(bytes []byte) error {
+ var discriminator struct {
+ ResultType string `json:"resultType"`
+ }
+ if err := json.Unmarshal(bytes, &discriminator); err != nil {
+ return err
+ }
+ *d = data{ResultType: discriminator.ResultType}
+
+ switch discriminator.ResultType {
+
+ case "matrix":
+ d.Result = &MatrixResult{}
+
+ case "vector":
+ d.Result = &VectorResult{}
+
+ case "scalar":
+ d.Result = &ScalarResult{}
+
+ case "string":
+ d.Result = &StringResult{}
+
+ default:
+ return fmt.Errorf("unknown resultType: %s", discriminator.ResultType)
+ }
+
+ return json.Unmarshal(bytes, d.Result)
+}
+
+// Len is the number of elements in the collection.
+func (r MatrixResult) Len() int { return len(r.Result) }
+
+// Less reports whether the element with
+// index i should sort before the element with index j.
+func (r MatrixResult) Less(i, j int) bool {
+ return r.Result[i].id < r.Result[j].id
+}
+
+// Swap swaps the elements with indexes i and j.
+func (r MatrixResult) Swap(i, j int) { r.Result[i], r.Result[j] = r.Result[j], r.Result[i] }
+
+// Sort sorts the MatrixResult.
+func (r MatrixResult) Sort() {
+ for i, result := range r.Result {
+ r.Result[i].id = result.Metric.genID()
+ }
+
+ sort.Sort(r)
+}
+
+// Len is the number of elements in the vector.
+func (r VectorResult) Len() int { return len(r.Result) }
+
+// Less reports whether the element with
+// index i should sort before the element with index j.
+func (r VectorResult) Less(i, j int) bool {
+ return r.Result[i].id < r.Result[j].id
+}
+
+// Swap swaps the elements with indexes i and j.
+func (r VectorResult) Swap(i, j int) { r.Result[i], r.Result[j] = r.Result[j], r.Result[i] }
+
+// Sort sorts the VectorResult.
+func (r VectorResult) Sort() {
+ for i, result := range r.Result {
+ r.Result[i].id = result.Metric.genID()
+ }
+
+ sort.Sort(r)
+}
+
+// matrixRow is a single row of "matrix" Result.
+type matrixRow struct {
+ // Metric is the tags for the matrixRow.
+ Metric Tags `json:"metric"`
+ // Values is the set of values for the matrixRow.
+ Values Values `json:"values"`
+ id string
+}
+
+// vectorItem is a single item of "vector" Result.
+type vectorItem struct {
+ // Metric is the tags for the vectorItem.
+ Metric Tags `json:"metric"`
+ // Value is the value for the vectorItem.
+ Value Value `json:"value"`
+ id string
+}
+
+// Tags is a simple representation of Prometheus tags.
+type Tags map[string]string
+
+// Values is a list of values for the Prometheus Result.
+type Values []Value
+
+// Value is a single value for Prometheus Result.
+type Value []interface{}
+
+func (t *Tags) genID() string {
+ tags := make(sort.StringSlice, len(*t))
+ for k, v := range *t {
+ tags = append(tags, fmt.Sprintf("%s:%s,", k, v))
+ }
+
+ sort.Sort(tags)
+ var sb strings.Builder
+ // NB: this may clash but exact tag values are also checked, and this is a
+ // validation endpoint so there's less concern over correctness.
+ for _, t := range tags {
+ sb.WriteString(t)
+ }
+
+ return sb.String()
+}
+
+// MatchInformation describes how well two responses match.
+type MatchInformation struct {
+ // FullMatch indicates a full match.
+ FullMatch bool
+ // NoMatch indicates that the responses do not match sufficiently.
+ NoMatch bool
+}
+
+// Matches compares two responses and determines how closely they match.
+func (r Response) Matches(other Response) (MatchInformation, error) {
+ if r.Status != other.Status {
+ err := fmt.Errorf("status %s does not match other status %s",
+ r.Status, other.Status)
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ if r.Status == "error" {
+ return MatchInformation{
+ FullMatch: true,
+ }, nil
+ }
+
+ return r.Data.matches(other.Data)
+}
+
+func (d data) matches(other data) (MatchInformation, error) {
+ if d.ResultType != other.ResultType {
+ err := fmt.Errorf("result type %s does not match other result type %s",
+ d.ResultType, other.ResultType)
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ return d.Result.matches(other.Result)
+}
+
+func (r MatrixResult) matches(other result) (MatchInformation, error) {
+ otherMatrix, ok := other.(*MatrixResult)
+ if !ok {
+ err := fmt.Errorf("incorrect type for matching, expected MatrixResult, %v", other)
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ if len(r.Result) != len(otherMatrix.Result) {
+ err := fmt.Errorf("result length %d does not match other result length %d",
+ len(r.Result), len(otherMatrix.Result))
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ r.Sort()
+ otherMatrix.Sort()
+ for i, result := range r.Result {
+ if err := result.matches(otherMatrix.Result[i]); err != nil {
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+ }
+
+ return MatchInformation{FullMatch: true}, nil
+}
+
+func (r VectorResult) matches(other result) (MatchInformation, error) {
+ otherVector, ok := other.(*VectorResult)
+ if !ok {
+ err := fmt.Errorf("incorrect type for matching, expected VectorResult")
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ if len(r.Result) != len(otherVector.Result) {
+ err := fmt.Errorf("result length %d does not match other result length %d",
+ len(r.Result), len(otherVector.Result))
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ r.Sort()
+ otherVector.Sort()
+ for i, result := range r.Result {
+ if err := result.matches(otherVector.Result[i]); err != nil {
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+ }
+
+ return MatchInformation{FullMatch: true}, nil
+}
+
+func (r ScalarResult) matches(other result) (MatchInformation, error) {
+ otherScalar, ok := other.(*ScalarResult)
+ if !ok {
+ err := fmt.Errorf("incorrect type for matching, expected ScalarResult")
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ if err := r.Result.matches(otherScalar.Result); err != nil {
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ return MatchInformation{FullMatch: true}, nil
+}
+
+func (r StringResult) matches(other result) (MatchInformation, error) {
+ otherString, ok := other.(*StringResult)
+ if !ok {
+ err := fmt.Errorf("incorrect type for matching, expected StringResult")
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ if err := r.Result.matches(otherString.Result); err != nil {
+ return MatchInformation{
+ NoMatch: true,
+ }, err
+ }
+
+ return MatchInformation{FullMatch: true}, nil
+}
+
+func (r matrixRow) matches(other matrixRow) error {
+ // NB: tags should match by here so this is more of a sanity check.
+ if err := r.Metric.matches(other.Metric); err != nil {
+ return err
+ }
+
+ return r.Values.matches(other.Values)
+}
+
+func (r vectorItem) matches(other vectorItem) error {
+ // NB: tags should match by here so this is more of a sanity check.
+ if err := r.Metric.matches(other.Metric); err != nil {
+ return err
+ }
+
+ return r.Value.matches(other.Value)
+}
+
+func (t Tags) matches(other Tags) error {
+ if len(t) != len(other) {
+ return fmt.Errorf("tag length %d does not match other tag length %d",
+ len(t), len(other))
+ }
+
+ for k, v := range t {
+ if vv, ok := other[k]; ok {
+ if v != vv {
+ return fmt.Errorf("tag %s value %s does not match other tag value %s", k, v, vv)
+ }
+ } else {
+ return fmt.Errorf("tag %s not found in other tagset", v)
+ }
+ }
+
+ return nil
+}
+
+func (v Values) matches(other Values) error {
+ if len(v) != len(other) {
+ return fmt.Errorf("values length %d does not match other values length %d",
+ len(v), len(other))
+ }
+
+ for i, val := range v {
+ if err := val.matches(other[i]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (v Value) matches(other Value) error {
+ if len(v) != 2 {
+ return fmt.Errorf("value length %d must be 2", len(v))
+ }
+
+ if len(other) != 2 {
+ return fmt.Errorf("other value length %d must be 2", len(other))
+ }
+
+ tsV := fmt.Sprint(v[0])
+ tsOther := fmt.Sprint(other[0])
+ if tsV != tsOther {
+ return fmt.Errorf("ts %s does not match other ts %s", tsV, tsOther)
+ }
+
+ valV, err := strconv.ParseFloat(fmt.Sprint(v[1]), 64)
+ if err != nil {
+ return err
+ }
+
+ valOther, err := strconv.ParseFloat(fmt.Sprint(other[1]), 64)
+ if err != nil {
+ return err
+ }
+
+ if math.Abs(valV-valOther) > tolerance {
+ return fmt.Errorf("point %f does not match other point %f", valV, valOther)
+ }
+
+ return nil
+}
diff --git a/src/query/api/v1/handler/prometheus/response_test.go b/src/query/api/v1/handler/prometheus/response_test.go
new file mode 100644
index 0000000000..c076410458
--- /dev/null
+++ b/src/query/api/v1/handler/prometheus/response_test.go
@@ -0,0 +1,532 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ fullMatch = MatchInformation{FullMatch: true}
+ noMatch = MatchInformation{NoMatch: true}
+)
+
+func TestUnmarshalPrometheusResponse(t *testing.T) {
+ tests := []struct {
+ name string
+ givenJson string
+ wantResponse Response
+ }{
+ {
+ name: "status: error",
+ givenJson: `{
+ "status": "error",
+ "errorType": "bad_data",
+ "error": "invalid parameter"
+ }`,
+ wantResponse: Response{
+ Status: "error",
+ },
+ },
+ {
+ name: "resultType: scalar",
+ givenJson: `{
+ "status": "success",
+ "data": {
+ "resultType": "scalar",
+ "result": [1590605774, "84"]
+ }
+ }`,
+ wantResponse: Response{
+ "success",
+ data{
+ "scalar",
+ &ScalarResult{Value{1590605774.0, "84"}},
+ },
+ },
+ },
+ {
+ name: "resultType: string",
+ givenJson: `{
+ "status": "success",
+ "data": {
+ "resultType": "string",
+ "result": [1590605775, "FOO"]
+ }
+ }`,
+ wantResponse: Response{
+ "success",
+ data{
+ "string",
+ &StringResult{Value{1590605775.0, "FOO"}},
+ },
+ },
+ },
+ {
+ name: "resultType: vector",
+ givenJson: `{
+ "status": "success",
+ "data": {
+ "resultType": "vector",
+ "result": [
+ {
+ "metric": {
+ "__name__": "foo",
+ "bar": "1"
+ },
+ "value": [1590605775, "0.5"]
+ },
+ {
+ "metric": {
+ "__name__": "foo",
+ "bar": "2"
+ },
+ "value": [1590605776, "2"]
+ }
+ ]
+ }
+ }`,
+ wantResponse: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "foo", "bar": "1"},
+ Value: Value{1590605775.0, "0.5"},
+ },
+ {
+ Metric: Tags{"__name__": "foo", "bar": "2"},
+ Value: Value{1590605776.0, "2"},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "resultType: matrix",
+ givenJson: `{
+ "status": "success",
+ "data": {
+ "resultType": "matrix",
+ "result": [
+ {
+ "metric": {
+ "__name__": "foo",
+ "bar": "1"
+ },
+ "values": [[1590605775, "1"], [1590605785, "11"]]
+ },
+ {
+ "metric": {
+ "__name__": "foo",
+ "bar": "2"
+ },
+ "values": [[1590605776, "2"], [1590605786, "22"]]
+ }
+ ]
+ }
+ }`,
+ wantResponse: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "foo", "bar": "1"},
+ Values: Values{{1590605775.0, "1"}, {1590605785.0, "11"}},
+ },
+ {
+ Metric: Tags{"__name__": "foo", "bar": "2"},
+ Values: Values{{1590605776.0, "2"}, {1590605786.0, "22"}},
+ },
+ }},
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ response := &Response{}
+ err := json.Unmarshal([]byte(tt.givenJson), response)
+ require.NoError(t, err)
+ assert.Equal(t, tt.wantResponse, *response)
+ })
+ }
+}
+
+func TestResponseMatching(t *testing.T) {
+ tests := []struct {
+ name string
+ response Response
+ }{
+ {
+ name: "error",
+ response: Response{
+ Status: "error",
+ },
+ },
+
+ {
+ name: "scalar",
+ response: Response{
+ "success",
+ data{
+ "scalar",
+ &ScalarResult{Value{1590605774.0, "1"}},
+ },
+ },
+ },
+ {
+ name: "scalar other timestamp",
+ response: Response{
+ "success",
+ data{
+ "scalar",
+ &ScalarResult{Value{1590605775.0, "1"}},
+ },
+ },
+ },
+ {
+ name: "scalar other value",
+ response: Response{
+ "success",
+ data{
+ "scalar",
+ &ScalarResult{Value{1590605774.0, "2"}},
+ },
+ },
+ },
+
+ {
+ name: "vector",
+ response: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Value: Value{1590605775.0, "0.5"},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "vector more tags",
+ response: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "foo", "bar": "1"},
+ Value: Value{1590605775.0, "0.5"},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "vector more items",
+ response: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "foo", "bar": "1"},
+ Value: Value{1590605775.0, "0.5"},
+ },
+ {
+ Metric: Tags{"__name__": "foo", "bar": "2"},
+ Value: Value{1590605775.0, "0.5"},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "vector different tag",
+ response: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "bar"},
+ Value: Value{1590605775.0, "0.5"},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "vector different value",
+ response: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Value: Value{1590605775.0, "1"},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "vector different timestamp",
+ response: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Value: Value{1590605774.0, "0.5"},
+ },
+ }},
+ },
+ },
+ },
+
+ {
+ name: "matrix",
+ response: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Values: Values{{1590605775.0, "1"}},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "matrix other tag",
+ response: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "bar"},
+ Values: Values{{1590605775.0, "1"}},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "matrix other value",
+ response: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Values: Values{{1590605775.0, "2"}},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "matrix other timestamp",
+ response: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Values: Values{{1590605776.0, "1"}},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "matrix more tags",
+ response: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "foo", "bar": "1"},
+ Values: Values{{1590605775.0, "1"}},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "matrix more values",
+ response: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Values: Values{{1590605775.0, "1"}, {1590605776.0, "2"}},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "matrix more rows",
+ response: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "foo"},
+ Values: Values{{1590605775.0, "1"}},
+ },
+ {
+ Metric: Tags{"__name__": "bar"},
+ Values: Values{{1590605775.0, "1"}},
+ },
+ }},
+ },
+ },
+ },
+ }
+
+ for i, ti := range tests {
+ for j, tj := range tests {
+ t.Run(fmt.Sprintf("%s vs %s", ti.name, tj.name), func(t *testing.T) {
+ matchResult, err := ti.response.Matches(tj.response)
+ if i == j { // should match
+ require.NoError(t, err)
+ assert.Equal(t, fullMatch, matchResult)
+ } else { // should not match
+ require.Error(t, err)
+ assert.Equal(t, noMatch, matchResult)
+ }
+ })
+ }
+ }
+}
+
+func TestResponseMatchingOrderInsensitive(t *testing.T) {
+ tests := []struct {
+ name string
+ left Response
+ right Response
+ }{
+ {
+ name: "vector",
+ left: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "first"},
+ Value: Value{1590605775.0, "1"},
+ },
+ {
+ Metric: Tags{"__name__": "second"},
+ Value: Value{1590605775.0, "2"},
+ },
+ }},
+ },
+ },
+ right: Response{
+ "success",
+ data{
+ "vector",
+ &VectorResult{[]vectorItem{
+ {
+ Metric: Tags{"__name__": "second"},
+ Value: Value{1590605775.0, "2"},
+ },
+ {
+ Metric: Tags{"__name__": "first"},
+ Value: Value{1590605775.0, "1"},
+ },
+ }},
+ },
+ },
+ },
+ {
+ name: "matrix",
+ left: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "first"},
+ Values: Values{{1590605775.0, "1"}},
+ },
+ {
+ Metric: Tags{"__name__": "second"},
+ Values: Values{{1590605775.0, "2"}},
+ },
+ }},
+ },
+ },
+ right: Response{
+ "success",
+ data{
+ "matrix",
+ &MatrixResult{[]matrixRow{
+ {
+ Metric: Tags{"__name__": "second"},
+ Values: Values{{1590605775.0, "2"}},
+ },
+ {
+ Metric: Tags{"__name__": "first"},
+ Values: Values{{1590605775.0, "1"}},
+ },
+ }},
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf(tt.name), func(t *testing.T) {
+ matchResult, err := tt.left.Matches(tt.right)
+ require.NoError(t, err)
+ assert.Equal(t, fullMatch, matchResult)
+ })
+ }
+}
diff --git a/src/query/api/v1/handler/search.go b/src/query/api/v1/handler/search.go
index 9eeb9fd1a1..3345ab63f3 100644
--- a/src/query/api/v1/handler/search.go
+++ b/src/query/api/v1/handler/search.go
@@ -67,16 +67,19 @@ func (h *SearchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger := logging.WithContext(r.Context(), h.instrumentOpts)
query, parseBodyErr := h.parseBody(r)
- opts, parseURLParamsErr := h.parseURLParams(r)
+ fetchOpts, parseURLParamsErr := h.parseURLParams(r)
if err := firstParseError(parseBodyErr, parseURLParamsErr); err != nil {
logger.Error("unable to parse request", zap.Error(err.Inner()))
xhttp.Error(w, err.Inner(), err.Code())
return
}
- results, err := h.search(r.Context(), query, opts)
+ results, err := h.search(r.Context(), query, fetchOpts)
if err != nil {
- logger.Error("unable to fetch data", zap.Error(err))
+ logger.Error("search query error",
+ zap.Error(err),
+ zap.Any("query", query),
+ zap.Any("fetchOpts", fetchOpts))
xhttp.Error(w, err, http.StatusBadRequest)
return
}
@@ -105,9 +108,34 @@ func (h *SearchHandler) parseURLParams(r *http.Request) (*storage.FetchOptions,
return nil, parseErr
}
+ // Parse for series and docs limits as query params.
+ // For backwards compat, allow "limit" and "seriesLimit"
+ // for the series limit name.
if str := r.URL.Query().Get("limit"); str != "" {
var err error
- fetchOpts.Limit, err = strconv.Atoi(str)
+ fetchOpts.SeriesLimit, err = strconv.Atoi(str)
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+ } else if str := r.URL.Query().Get("seriesLimit"); str != "" {
+ var err error
+ fetchOpts.SeriesLimit, err = strconv.Atoi(str)
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+ }
+
+ if str := r.URL.Query().Get("docsLimit"); str != "" {
+ var err error
+ fetchOpts.DocsLimit, err = strconv.Atoi(str)
+ if err != nil {
+ return nil, xhttp.NewParseError(err, http.StatusBadRequest)
+ }
+ }
+
+ if str := r.URL.Query().Get("requireExhaustive"); str != "" {
+ var err error
+ fetchOpts.RequireExhaustive, err = strconv.ParseBool(str)
if err != nil {
return nil, xhttp.NewParseError(err, http.StatusBadRequest)
}
diff --git a/src/query/api/v1/handler/search_test.go b/src/query/api/v1/handler/search_test.go
index 584327a010..f6d38cb080 100644
--- a/src/query/api/v1/handler/search_test.go
+++ b/src/query/api/v1/handler/search_test.go
@@ -40,6 +40,8 @@ import (
"github.com/m3db/m3/src/query/test/m3"
"github.com/m3db/m3/src/query/test/seriesiter"
"github.com/m3db/m3/src/x/ident"
+ xhttp "github.com/m3db/m3/src/x/net/http"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -84,14 +86,14 @@ func generateTagIters(ctrl *gomock.Controller) *client.MockTaggedIDsIterator {
mockTaggedIDsIter.EXPECT().Next().Return(false)
mockTaggedIDsIter.EXPECT().Current().Return(ident.StringID("ns"),
ident.StringID(testID), seriesiter.GenerateSingleSampleTagIterator(ctrl, seriesiter.GenerateTag()))
- mockTaggedIDsIter.EXPECT().Err().Return(nil)
+ mockTaggedIDsIter.EXPECT().Err().Return(nil).MinTimes(1)
mockTaggedIDsIter.EXPECT().Finalize()
return mockTaggedIDsIter
}
func searchServer(t *testing.T) *SearchHandler {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
mockTaggedIDsIter := generateTagIters(ctrl)
@@ -99,8 +101,8 @@ func searchServer(t *testing.T) *SearchHandler {
session.EXPECT().FetchTaggedIDs(gomock.Any(), gomock.Any(), gomock.Any()).
Return(mockTaggedIDsIter, client.FetchResponseMetadata{Exhaustive: false}, nil).AnyTimes()
- builder := handleroptions.
- NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{})
+ builder := handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetStorage(storage).SetFetchOptionsBuilder(builder)
search := NewSearchHandler(opts)
@@ -113,7 +115,7 @@ func TestSearchResponse(t *testing.T) {
searchHandler := searchServer(t)
opts := storage.NewFetchOptions()
- opts.Limit = 100
+ opts.SeriesLimit = 100
results, err := searchHandler.search(context.TODO(), generateSearchReq(), opts)
require.NoError(t, err)
@@ -129,7 +131,7 @@ func TestSearchEndpoint(t *testing.T) {
urlWithLimit := fmt.Sprintf("%s%s", server.URL, "?limit=90")
req, _ := http.NewRequest("POST", urlWithLimit, generateSearchBody(t))
- req.Header.Add("Content-Type", "application/json")
+ req.Header.Add(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
diff --git a/src/query/api/v1/handler/topic/add.go b/src/query/api/v1/handler/topic/add.go
index 9f37115b9a..e2362ebc2f 100644
--- a/src/query/api/v1/handler/topic/add.go
+++ b/src/query/api/v1/handler/topic/add.go
@@ -47,12 +47,12 @@ const (
// AddHandler is the handler for topic adds.
type AddHandler Handler
-// NewAddHandler returns a new instance of AddHandler.
-func NewAddHandler(
+// newAddHandler returns a new instance of AddHandler.
+func newAddHandler(
client clusterclient.Client,
cfg config.Configuration,
instrumentOpts instrument.Options,
-) *AddHandler {
+) http.Handler {
return &AddHandler{
client: client,
cfg: cfg,
diff --git a/src/query/api/v1/handler/topic/add_test.go b/src/query/api/v1/handler/topic/add_test.go
index 579f9c7251..4ec7fd58af 100644
--- a/src/query/api/v1/handler/topic/add_test.go
+++ b/src/query/api/v1/handler/topic/add_test.go
@@ -43,8 +43,8 @@ func TestTopicAddHandler(t *testing.T) {
defer ctrl.Finish()
mockService := setupTest(t, ctrl)
- handler := NewAddHandler(nil, config.Configuration{}, instrument.NewOptions())
- handler.serviceFn = testServiceFn(mockService)
+ handler := newAddHandler(nil, config.Configuration{}, instrument.NewOptions())
+ handler.(*AddHandler).serviceFn = testServiceFn(mockService)
t1 := topic.NewTopic().SetName(DefaultTopicName).SetNumberOfShards(256)
diff --git a/src/query/api/v1/handler/topic/common.go b/src/query/api/v1/handler/topic/common.go
index 1ae2a1bcc6..3f210bb62b 100644
--- a/src/query/api/v1/handler/topic/common.go
+++ b/src/query/api/v1/handler/topic/common.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -81,16 +81,19 @@ func RegisterRoutes(
}
r.HandleFunc(InitURL,
- wrapped(NewInitHandler(client, cfg, instrumentOpts)).ServeHTTP).
+ wrapped(newInitHandler(client, cfg, instrumentOpts)).ServeHTTP).
Methods(InitHTTPMethod)
r.HandleFunc(GetURL,
- wrapped(NewGetHandler(client, cfg, instrumentOpts)).ServeHTTP).
+ wrapped(newGetHandler(client, cfg, instrumentOpts)).ServeHTTP).
Methods(GetHTTPMethod)
r.HandleFunc(AddURL,
- wrapped(NewAddHandler(client, cfg, instrumentOpts)).ServeHTTP).
+ wrapped(newAddHandler(client, cfg, instrumentOpts)).ServeHTTP).
Methods(AddHTTPMethod)
+ r.HandleFunc(UpdateURL,
+ wrapped(newUpdateHandler(client, cfg, instrumentOpts)).ServeHTTP).
+ Methods(UpdateHTTPMethod)
r.HandleFunc(DeleteURL,
- wrapped(NewDeleteHandler(client, cfg, instrumentOpts)).ServeHTTP).
+ wrapped(newDeleteHandler(client, cfg, instrumentOpts)).ServeHTTP).
Methods(DeleteHTTPMethod)
}
diff --git a/src/query/api/v1/handler/topic/delete.go b/src/query/api/v1/handler/topic/delete.go
index 6af4cc1c10..90c4098c39 100644
--- a/src/query/api/v1/handler/topic/delete.go
+++ b/src/query/api/v1/handler/topic/delete.go
@@ -46,12 +46,12 @@ const (
// DeleteHandler is the handler for topic adds.
type DeleteHandler Handler
-// NewDeleteHandler returns a new instance of DeleteHandler.
-func NewDeleteHandler(
+// newDeleteHandler returns a new instance of DeleteHandler.
+func newDeleteHandler(
client clusterclient.Client,
cfg config.Configuration,
instrumentOpts instrument.Options,
-) *DeleteHandler {
+) http.Handler {
return &DeleteHandler{
client: client,
cfg: cfg,
diff --git a/src/query/api/v1/handler/topic/delete_test.go b/src/query/api/v1/handler/topic/delete_test.go
index 577b5cb848..ebf0de86fc 100644
--- a/src/query/api/v1/handler/topic/delete_test.go
+++ b/src/query/api/v1/handler/topic/delete_test.go
@@ -39,8 +39,8 @@ func TestTopicDeleteHandler(t *testing.T) {
defer ctrl.Finish()
mockService := setupTest(t, ctrl)
- handler := NewDeleteHandler(nil, config.Configuration{}, instrument.NewOptions())
- handler.serviceFn = testServiceFn(mockService)
+ handler := newDeleteHandler(nil, config.Configuration{}, instrument.NewOptions())
+ handler.(*DeleteHandler).serviceFn = testServiceFn(mockService)
// Test successful get
w := httptest.NewRecorder()
diff --git a/src/query/api/v1/handler/topic/get.go b/src/query/api/v1/handler/topic/get.go
index b6b8d06972..654ac12c14 100644
--- a/src/query/api/v1/handler/topic/get.go
+++ b/src/query/api/v1/handler/topic/get.go
@@ -47,12 +47,12 @@ const (
// GetHandler is the handler for topic gets.
type GetHandler Handler
-// NewGetHandler returns a new instance of GetHandler.
-func NewGetHandler(
+// newGetHandler returns a new instance of GetHandler.
+func newGetHandler(
client clusterclient.Client,
cfg config.Configuration,
instrumentOpts instrument.Options,
-) *GetHandler {
+) http.Handler {
return &GetHandler{
client: client,
cfg: cfg,
diff --git a/src/query/api/v1/handler/topic/get_test.go b/src/query/api/v1/handler/topic/get_test.go
index cc8e3fa0aa..e1f04c1a15 100644
--- a/src/query/api/v1/handler/topic/get_test.go
+++ b/src/query/api/v1/handler/topic/get_test.go
@@ -43,8 +43,8 @@ func TestTopicGetHandler(t *testing.T) {
defer ctrl.Finish()
mockService := setupTest(t, ctrl)
- handler := NewGetHandler(nil, config.Configuration{}, instrument.NewOptions())
- handler.serviceFn = testServiceFn(mockService)
+ handler := newGetHandler(nil, config.Configuration{}, instrument.NewOptions())
+ handler.(*GetHandler).serviceFn = testServiceFn(mockService)
// Test successful get
w := httptest.NewRecorder()
diff --git a/src/query/api/v1/handler/topic/init.go b/src/query/api/v1/handler/topic/init.go
index 41e186fb25..6bed0b3e55 100644
--- a/src/query/api/v1/handler/topic/init.go
+++ b/src/query/api/v1/handler/topic/init.go
@@ -47,12 +47,12 @@ const (
// InitHandler is the handler for topic inits.
type InitHandler Handler
-// NewInitHandler returns a new instance of InitHandler.
-func NewInitHandler(
+// newInitHandler returns a new instance of InitHandler.
+func newInitHandler(
client clusterclient.Client,
cfg config.Configuration,
instrumentOpts instrument.Options,
-) *InitHandler {
+) http.Handler {
return &InitHandler{
client: client,
cfg: cfg,
diff --git a/src/query/api/v1/handler/topic/init_test.go b/src/query/api/v1/handler/topic/init_test.go
index c0b86ddea4..78085bec2b 100644
--- a/src/query/api/v1/handler/topic/init_test.go
+++ b/src/query/api/v1/handler/topic/init_test.go
@@ -43,8 +43,8 @@ func TestPlacementInitHandler(t *testing.T) {
defer ctrl.Finish()
mockService := setupTest(t, ctrl)
- handler := NewInitHandler(nil, config.Configuration{}, instrument.NewOptions())
- handler.serviceFn = testServiceFn(mockService)
+ handler := newInitHandler(nil, config.Configuration{}, instrument.NewOptions())
+ handler.(*InitHandler).serviceFn = testServiceFn(mockService)
// Test topic init success
initProto := admin.TopicInitRequest{
diff --git a/src/query/api/v1/handler/topic/update.go b/src/query/api/v1/handler/topic/update.go
new file mode 100644
index 0000000000..b335ff5861
--- /dev/null
+++ b/src/query/api/v1/handler/topic/update.go
@@ -0,0 +1,136 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package topic
+
+import (
+ "net/http"
+
+ clusterclient "github.com/m3db/m3/src/cluster/client"
+ "github.com/m3db/m3/src/cmd/services/m3query/config"
+ "github.com/m3db/m3/src/msg/topic"
+ "github.com/m3db/m3/src/query/api/v1/handler"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+ "github.com/m3db/m3/src/query/util/logging"
+ "github.com/m3db/m3/src/x/instrument"
+ xhttp "github.com/m3db/m3/src/x/net/http"
+
+ pkgerrors "github.com/pkg/errors"
+ "go.uber.org/zap"
+)
+
+const (
+ // UpdateURL is the url for the topic update handler (with the PUT method).
+ UpdateURL = handler.RoutePrefixV1 + "/topic"
+
+ // UpdateHTTPMethod is the HTTP method used with this resource.
+ UpdateHTTPMethod = http.MethodPut
+)
+
+// UpdateHandler is the handler for topic updates.
+type UpdateHandler Handler
+
+// newUpdateHandler returns a new instance of UpdateHandler. This is used for
+// updating a topic in-place, for example to add or remove consumers.
+func newUpdateHandler(
+ client clusterclient.Client,
+ cfg config.Configuration,
+ instrumentOpts instrument.Options,
+) http.Handler {
+ return &UpdateHandler{
+ client: client,
+ cfg: cfg,
+ serviceFn: Service,
+ instrumentOpts: instrumentOpts,
+ }
+}
+
+func (h *UpdateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ var (
+ ctx = r.Context()
+ logger = logging.WithContext(ctx, h.instrumentOpts)
+ req admin.TopicUpdateRequest
+ )
+
+ if rErr := parseRequest(r, &req); rErr != nil {
+ logger.Error("unable to parse request", zap.Error(rErr))
+ xhttp.Error(w, rErr.Inner(), rErr.Code())
+ return
+ }
+
+ serviceCfg := handleroptions.ServiceNameAndDefaults{}
+ svcOpts := handleroptions.NewServiceOptions(serviceCfg, r.Header, nil)
+ service, err := h.serviceFn(h.client, svcOpts)
+ if err != nil {
+ logger.Error("unable to get service", zap.Error(err))
+ xhttp.Error(w, err, http.StatusInternalServerError)
+ return
+ }
+
+ name := topicName(r.Header)
+ svcLogger := logger.With(zap.String("service", name))
+ m3Topic, err := service.Get(name)
+ if err != nil {
+ logger.Error("unable to get topic", zap.Error(err))
+ xhttp.Error(w, err, http.StatusNotFound)
+ return
+ }
+
+ oldConsumers := len(m3Topic.ConsumerServices())
+ newConsumers := len(req.ConsumerServices)
+
+ csvcs := make([]topic.ConsumerService, 0, newConsumers)
+ for _, svc := range req.ConsumerServices {
+ csvc, err := topic.NewConsumerServiceFromProto(svc)
+ if err != nil {
+ err := pkgerrors.WithMessagef(err, "error converting consumer service '%s'", svc.String())
+ svcLogger.Error("convert consumer service error", zap.Error(err))
+ xhttp.Error(w, err, http.StatusBadRequest)
+ return
+ }
+
+ csvcs = append(csvcs, csvc)
+ }
+
+ m3Topic = m3Topic.SetConsumerServices(csvcs)
+ newTopic, err := service.CheckAndSet(m3Topic, int(req.Version))
+ if err != nil {
+ svcLogger.Error("unable to delete service", zap.Error(err))
+ err := pkgerrors.WithMessagef(err, "error deleting service '%s'", name)
+ xhttp.Error(w, err, http.StatusBadRequest)
+ return
+ }
+
+ svcLogger.Info("updated service in-place", zap.Int("oldConsumers", oldConsumers), zap.Int("newConsumers", newConsumers))
+
+ pb, err := topic.ToProto(m3Topic)
+ if err != nil {
+ logger.Error("unable to convert topic to protobuf", zap.Error(err))
+ xhttp.Error(w, err, http.StatusInternalServerError)
+ return
+ }
+
+ resp := &admin.TopicGetResponse{
+ Topic: pb,
+ Version: uint32(newTopic.Version()),
+ }
+ xhttp.WriteProtoMsgJSONResponse(w, resp, logger)
+}
diff --git a/src/query/api/v1/handler/topic/update_test.go b/src/query/api/v1/handler/topic/update_test.go
new file mode 100644
index 0000000000..ccff291961
--- /dev/null
+++ b/src/query/api/v1/handler/topic/update_test.go
@@ -0,0 +1,155 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package topic
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/m3db/m3/src/cmd/services/m3query/config"
+ "github.com/m3db/m3/src/msg/generated/proto/topicpb"
+ "github.com/m3db/m3/src/msg/topic"
+ "github.com/m3db/m3/src/query/generated/proto/admin"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ testTopicName = "test-topic"
+)
+
+func TestPlacementUpdateHandler(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ mockService := setupTest(t, ctrl)
+ handler := newUpdateHandler(nil, config.Configuration{}, instrument.NewOptions())
+ handler.(*UpdateHandler).serviceFn = testServiceFn(mockService)
+
+ consumerSvc := &topicpb.ConsumerService{
+ ServiceId: &topicpb.ServiceID{
+ Name: "svc",
+ Environment: "env",
+ Zone: "zone",
+ },
+ ConsumptionType: topicpb.ConsumptionType_SHARED,
+ }
+
+ // Test topic init success
+ updateProto := admin.TopicUpdateRequest{
+ ConsumerServices: []*topicpb.ConsumerService{consumerSvc},
+ Version: 2,
+ }
+ w := httptest.NewRecorder()
+ b := bytes.NewBuffer(nil)
+ require.NoError(t, jsonMarshaler.Marshal(b, &updateProto))
+ req := httptest.NewRequest("PUT", "/topic/update", b)
+ req.Header.Add("topic-name", testTopicName)
+ require.NotNil(t, req)
+
+ returnTopic := topic.NewTopic().
+ SetName(testTopicName).
+ SetNumberOfShards(256).
+ SetVersion(1).SetConsumerServices([]topic.ConsumerService{
+ func() topic.ConsumerService {
+ svc, err := topic.NewConsumerServiceFromProto(consumerSvc)
+ assert.NoError(t, err)
+ return svc
+ }(),
+ })
+
+ mockService.EXPECT().
+ Get(testTopicName).
+ Return(returnTopic, nil)
+
+ mockService.
+ EXPECT().
+ CheckAndSet(returnTopic, 2).
+ Return(
+ returnTopic.SetVersion(3),
+ nil,
+ )
+
+ handler.ServeHTTP(w, req)
+ resp := w.Result()
+ body, err := ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
+ var respProto admin.TopicGetResponse
+ require.NoError(t, jsonUnmarshaler.Unmarshal(bytes.NewBuffer(body), &respProto))
+
+ validateEqualTopicProto(t, topicpb.Topic{
+ Name: testTopicName,
+ NumberOfShards: 256,
+ ConsumerServices: []*topicpb.ConsumerService{consumerSvc},
+ }, *respProto.Topic)
+
+ require.Equal(t, uint32(3), respProto.Version)
+
+ // Test removing all consumers.
+ updateProto = admin.TopicUpdateRequest{
+ ConsumerServices: []*topicpb.ConsumerService{},
+ Version: 3,
+ }
+ w = httptest.NewRecorder()
+ b = bytes.NewBuffer(nil)
+ require.NoError(t, jsonMarshaler.Marshal(b, &updateProto))
+ req = httptest.NewRequest("PUT", "/topic/update", b)
+ req.Header.Add("topic-name", testTopicName)
+ require.NotNil(t, req)
+
+ returnTopic = returnTopic.SetConsumerServices([]topic.ConsumerService{})
+
+ mockService.EXPECT().
+ Get(testTopicName).
+ Return(returnTopic, nil)
+
+ mockService.
+ EXPECT().
+ CheckAndSet(returnTopic, 3).
+ Return(
+ returnTopic.SetVersion(4),
+ nil,
+ )
+
+ handler.ServeHTTP(w, req)
+ resp = w.Result()
+ body, err = ioutil.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
+ require.NoError(t, jsonUnmarshaler.Unmarshal(bytes.NewBuffer(body), &respProto))
+
+ validateEqualTopicProto(t, topicpb.Topic{
+ Name: testTopicName,
+ NumberOfShards: 256,
+ ConsumerServices: []*topicpb.ConsumerService{},
+ }, *respProto.Topic)
+
+ require.Equal(t, uint32(4), respProto.Version)
+}
diff --git a/src/query/api/v1/httpd/handler.go b/src/query/api/v1/httpd/handler.go
index 6a37244a32..eb115b2645 100644
--- a/src/query/api/v1/httpd/handler.go
+++ b/src/query/api/v1/httpd/handler.go
@@ -36,6 +36,7 @@ import (
"github.com/m3db/m3/src/query/api/v1/handler/namespace"
"github.com/m3db/m3/src/query/api/v1/handler/openapi"
"github.com/m3db/m3/src/query/api/v1/handler/placement"
+ "github.com/m3db/m3/src/query/api/v1/handler/prom"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/native"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote"
@@ -43,18 +44,25 @@ import (
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/util/logging"
xdebug "github.com/m3db/m3/src/x/debug"
+ "github.com/m3db/m3/src/x/headers"
xhttp "github.com/m3db/m3/src/x/net/http"
"github.com/m3db/m3/src/x/net/http/cors"
- "github.com/prometheus/prometheus/util/httputil"
"github.com/gorilla/mux"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
+ "github.com/prometheus/prometheus/util/httputil"
)
const (
healthURL = "/health"
routesURL = "/routes"
+ // EngineHeaderName defines header name which is used to switch between
+ // prometheus and m3query engines.
+ EngineHeaderName = headers.M3HeaderPrefix + "Engine"
+ // EngineURLParam defines query url parameter which is used to switch between
+ // prometheus and m3query engines.
+ EngineURLParam = "engine"
)
var (
@@ -156,19 +164,55 @@ func (h *Handler) RegisterRoutes() error {
Tagged(v1APIGroup),
))
- nativePromReadHandler := native.NewPromReadHandler(nativeSourceOpts)
+ // Register custom endpoints.
+ for _, custom := range h.customHandlers {
+ handler, err := custom.Handler(nativeSourceOpts)
+ if err != nil {
+ return err
+ }
+
+ h.router.HandleFunc(custom.Route(), handler.ServeHTTP).
+ Methods(custom.Methods()...)
+ }
+
+ opts := prom.Options{
+ PromQLEngine: h.options.PrometheusEngine(),
+ }
+ promqlQueryHandler := wrapped(prom.NewReadHandler(opts, nativeSourceOpts))
+ promqlInstantQueryHandler := wrapped(prom.NewReadInstantHandler(opts, nativeSourceOpts))
+ nativePromReadHandler := wrapped(native.NewPromReadHandler(nativeSourceOpts))
+ nativePromReadInstantHandler := wrapped(native.NewPromReadInstantHandler(nativeSourceOpts))
+
+ h.options.QueryRouter().Setup(options.QueryRouterOptions{
+ DefaultQueryEngine: h.options.DefaultQueryEngine(),
+ PromqlHandler: promqlQueryHandler.ServeHTTP,
+ M3QueryHandler: nativePromReadHandler.ServeHTTP,
+ })
+
+ h.options.InstantQueryRouter().Setup(options.QueryRouterOptions{
+ DefaultQueryEngine: h.options.DefaultQueryEngine(),
+ PromqlHandler: promqlInstantQueryHandler.ServeHTTP,
+ M3QueryHandler: nativePromReadInstantHandler.ServeHTTP,
+ })
+
+ h.router.
+ HandleFunc(native.PromReadURL, h.options.QueryRouter().ServeHTTP).
+ Methods(native.PromReadHTTPMethods...)
+ h.router.
+ HandleFunc(native.PromReadInstantURL, h.options.InstantQueryRouter().ServeHTTP).
+ Methods(native.PromReadInstantHTTPMethods...)
+
+ h.router.HandleFunc("/prometheus"+native.PromReadURL, promqlQueryHandler.ServeHTTP).Methods(native.PromReadHTTPMethods...)
+ h.router.HandleFunc("/prometheus"+native.PromReadInstantURL, promqlInstantQueryHandler.ServeHTTP).Methods(native.PromReadInstantHTTPMethods...)
+
h.router.HandleFunc(remote.PromReadURL,
wrapped(promRemoteReadHandler).ServeHTTP,
- ).Methods(remote.PromReadHTTPMethod)
+ ).Methods(remote.PromReadHTTPMethods...)
h.router.HandleFunc(remote.PromWriteURL,
panicOnly(promRemoteWriteHandler).ServeHTTP,
).Methods(remote.PromWriteHTTPMethod)
- h.router.HandleFunc(native.PromReadURL,
- wrapped(nativePromReadHandler).ServeHTTP,
- ).Methods(native.PromReadHTTPMethods...)
- h.router.HandleFunc(native.PromReadInstantURL,
- wrapped(native.NewPromReadInstantHandler(h.options)).ServeHTTP,
- ).Methods(native.PromReadInstantHTTPMethods...)
+ h.router.HandleFunc("/m3query"+native.PromReadURL, nativePromReadHandler.ServeHTTP).Methods(native.PromReadHTTPMethods...)
+ h.router.HandleFunc("/m3query"+native.PromReadInstantURL, nativePromReadInstantHandler.ServeHTTP).Methods(native.PromReadInstantHTTPMethods...)
// InfluxDB write endpoint.
h.router.HandleFunc(influxdb.InfluxWriteURL,
@@ -280,17 +324,6 @@ func (h *Handler) RegisterRoutes() error {
}
}
- // Register custom endpoints.
- for _, custom := range h.customHandlers {
- handler, err := custom.Handler(h.options)
- if err != nil {
- return err
- }
-
- h.router.HandleFunc(custom.Route(), handler.ServeHTTP).
- Methods(custom.Methods()...)
- }
-
h.registerHealthEndpoints()
h.registerProfileEndpoints()
h.registerRoutesEndpoint()
diff --git a/src/query/api/v1/httpd/handler_test.go b/src/query/api/v1/httpd/handler_test.go
index d71d4cfbed..078ee2e68c 100644
--- a/src/query/api/v1/httpd/handler_test.go
+++ b/src/query/api/v1/httpd/handler_test.go
@@ -89,7 +89,7 @@ func setupHandler(
customHandlers ...options.CustomHandler,
) (*Handler, error) {
instrumentOpts := instrument.NewOptions()
- downsamplerAndWriter := ingest.NewDownsamplerAndWriter(store, nil, testWorkerPool)
+ downsamplerAndWriter := ingest.NewDownsamplerAndWriter(store, nil, testWorkerPool, instrument.NewOptions())
engine := newEngine(store, time.Minute, nil, instrumentOpts)
opts, err := options.NewHandlerOptions(
downsamplerAndWriter,
@@ -97,6 +97,7 @@ func setupHandler(
engine,
nil,
nil,
+ nil,
config.Configuration{LookbackDuration: &defaultLookbackDuration},
nil,
nil,
@@ -106,6 +107,8 @@ func setupHandler(
defaultCPUProfileduration,
defaultPlacementServices,
svcDefaultOptions,
+ NewQueryRouter(),
+ NewQueryRouter(),
)
if err != nil {
@@ -115,38 +118,10 @@ func setupHandler(
return NewHandler(opts, customHandlers...), nil
}
-func TestHandlerFetchTimeoutError(t *testing.T) {
- ctrl := gomock.NewController(t)
- storage, _ := m3.NewStorageAndSession(t, ctrl)
- downsamplerAndWriter := ingest.NewDownsamplerAndWriter(storage, nil, testWorkerPool)
-
- negValue := -1 * time.Second
- dbconfig := &dbconfig.DBConfiguration{Client: client.Configuration{FetchTimeout: &negValue}}
- engine := newEngine(storage, time.Minute, nil, instrument.NewOptions())
- cfg := config.Configuration{LookbackDuration: &defaultLookbackDuration}
- _, err := options.NewHandlerOptions(
- downsamplerAndWriter,
- makeTagOptions(),
- engine,
- nil,
- nil,
- cfg,
- dbconfig,
- nil,
- handleroptions.NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{}),
- models.QueryContextOptions{},
- instrument.NewOptions(),
- defaultCPUProfileduration,
- defaultPlacementServices,
- svcDefaultOptions)
-
- require.Error(t, err)
-}
-
func TestHandlerFetchTimeout(t *testing.T) {
ctrl := gomock.NewController(t)
storage, _ := m3.NewStorageAndSession(t, ctrl)
- downsamplerAndWriter := ingest.NewDownsamplerAndWriter(storage, nil, testWorkerPool)
+ downsamplerAndWriter := ingest.NewDownsamplerAndWriter(storage, nil, testWorkerPool, instrument.NewOptions())
fourMin := 4 * time.Minute
dbconfig := &dbconfig.DBConfiguration{Client: client.Configuration{FetchTimeout: &fourMin}}
@@ -158,6 +133,7 @@ func TestHandlerFetchTimeout(t *testing.T) {
engine,
nil,
nil,
+ nil,
cfg,
dbconfig,
nil,
@@ -166,7 +142,10 @@ func TestHandlerFetchTimeout(t *testing.T) {
instrument.NewOptions(),
defaultCPUProfileduration,
defaultPlacementServices,
- svcDefaultOptions)
+ svcDefaultOptions,
+ nil,
+ nil,
+ )
require.NoError(t, err)
h := NewHandler(opts)
@@ -185,7 +164,7 @@ func TestPromRemoteReadGet(t *testing.T) {
err = h.RegisterRoutes()
require.NoError(t, err, "unable to register routes")
h.Router().ServeHTTP(res, req)
- require.Equal(t, res.Code, http.StatusMethodNotAllowed, "GET method not defined")
+ require.Equal(t, http.StatusBadRequest, res.Code)
}
func TestPromRemoteReadPost(t *testing.T) {
@@ -199,33 +178,59 @@ func TestPromRemoteReadPost(t *testing.T) {
err = h.RegisterRoutes()
require.NoError(t, err, "unable to register routes")
h.Router().ServeHTTP(res, req)
- require.Equal(t, res.Code, http.StatusBadRequest, "Empty request")
+ require.Equal(t, http.StatusBadRequest, res.Code, "Empty request")
}
func TestPromNativeReadGet(t *testing.T) {
- req := httptest.NewRequest("GET", native.PromReadURL, nil)
- res := httptest.NewRecorder()
- ctrl := gomock.NewController(t)
- storage, _ := m3.NewStorageAndSession(t, ctrl)
+ tests := []struct {
+ routePrefix string
+ }{
+ {""},
+ {"/prometheus"},
+ {"/m3query"},
+ }
- h, err := setupHandler(storage)
- require.NoError(t, err, "unable to setup handler")
- h.RegisterRoutes()
- h.Router().ServeHTTP(res, req)
- require.Equal(t, res.Code, http.StatusBadRequest, "Empty request")
+ for _, tt := range tests {
+ url := tt.routePrefix + native.PromReadURL
+ t.Run("Testing endpoint GET "+url, func(t *testing.T) {
+ req := httptest.NewRequest("GET", url, nil)
+ res := httptest.NewRecorder()
+ ctrl := gomock.NewController(t)
+ storage, _ := m3.NewStorageAndSession(t, ctrl)
+
+ h, err := setupHandler(storage)
+ require.NoError(t, err, "unable to setup handler")
+ h.RegisterRoutes()
+ h.Router().ServeHTTP(res, req)
+ require.Equal(t, http.StatusBadRequest, res.Code, "Empty request")
+ })
+ }
}
func TestPromNativeReadPost(t *testing.T) {
- req := httptest.NewRequest("POST", native.PromReadURL, nil)
- res := httptest.NewRecorder()
- ctrl := gomock.NewController(t)
- storage, _ := m3.NewStorageAndSession(t, ctrl)
+ tests := []struct {
+ routePrefix string
+ }{
+ {""},
+ {"/prometheus"},
+ {"/m3query"},
+ }
- h, err := setupHandler(storage)
- require.NoError(t, err, "unable to setup handler")
- h.RegisterRoutes()
- h.Router().ServeHTTP(res, req)
- require.Equal(t, res.Code, http.StatusBadRequest, "Empty request")
+ for _, tt := range tests {
+ url := tt.routePrefix + native.PromReadURL
+ t.Run("Testing endpoint GET "+url, func(t *testing.T) {
+ req := httptest.NewRequest("POST", url, nil)
+ res := httptest.NewRecorder()
+ ctrl := gomock.NewController(t)
+ storage, _ := m3.NewStorageAndSession(t, ctrl)
+
+ h, err := setupHandler(storage)
+ require.NoError(t, err, "unable to setup handler")
+ h.RegisterRoutes()
+ h.Router().ServeHTTP(res, req)
+ require.Equal(t, http.StatusBadRequest, res.Code, "Empty request")
+ })
+ }
}
func TestJSONWritePost(t *testing.T) {
@@ -238,7 +243,7 @@ func TestJSONWritePost(t *testing.T) {
require.NoError(t, err, "unable to setup handler")
h.RegisterRoutes()
h.Router().ServeHTTP(res, req)
- require.Equal(t, res.Code, http.StatusBadRequest, "Empty request")
+ require.Equal(t, http.StatusBadRequest, res.Code, "Empty request")
}
func TestRoutesGet(t *testing.T) {
@@ -393,14 +398,14 @@ func TestCustomRoutes(t *testing.T) {
ctrl := gomock.NewController(t)
store, _ := m3.NewStorageAndSession(t, ctrl)
instrumentOpts := instrument.NewOptions()
- downsamplerAndWriter := ingest.NewDownsamplerAndWriter(store, nil, testWorkerPool)
+ downsamplerAndWriter := ingest.NewDownsamplerAndWriter(store, nil, testWorkerPool, instrument.NewOptions())
engine := newEngine(store, time.Minute, nil, instrumentOpts)
opts, err := options.NewHandlerOptions(
- downsamplerAndWriter, makeTagOptions().SetMetricName([]byte("z")), engine, nil, nil,
+ downsamplerAndWriter, makeTagOptions().SetMetricName([]byte("z")), engine, nil, nil, nil,
config.Configuration{LookbackDuration: &defaultLookbackDuration}, nil, nil,
handleroptions.NewFetchOptionsBuilder(handleroptions.FetchOptionsBuilderOptions{}),
models.QueryContextOptions{}, instrumentOpts, defaultCPUProfileduration,
- defaultPlacementServices, svcDefaultOptions,
+ defaultPlacementServices, svcDefaultOptions, NewQueryRouter(), NewQueryRouter(),
)
require.NoError(t, err)
diff --git a/src/query/api/v1/httpd/router.go b/src/query/api/v1/httpd/router.go
new file mode 100644
index 0000000000..a0ae48ee7e
--- /dev/null
+++ b/src/query/api/v1/httpd/router.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package httpd
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/m3db/m3/src/query/api/v1/options"
+)
+
+type router struct {
+ promqlHandler func(http.ResponseWriter, *http.Request)
+ m3QueryHandler func(http.ResponseWriter, *http.Request)
+ defaultQueryEngine options.QueryEngine
+}
+
+func NewQueryRouter() options.QueryRouter {
+ return &router{}
+}
+
+func (r *router) Setup(opts options.QueryRouterOptions) {
+ defaultEngine := opts.DefaultQueryEngine
+ if defaultEngine != options.PrometheusEngine && defaultEngine != options.M3QueryEngine {
+ defaultEngine = options.PrometheusEngine
+ }
+
+ r.defaultQueryEngine = defaultEngine
+ r.promqlHandler = opts.PromqlHandler
+ r.m3QueryHandler = opts.M3QueryHandler
+}
+
+func (r *router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ engine := strings.ToLower(req.Header.Get(EngineHeaderName))
+ urlParam := req.URL.Query().Get(EngineURLParam)
+
+ if len(urlParam) > 0 {
+ engine = strings.ToLower(urlParam)
+ }
+
+ if !options.IsQueryEngineSet(engine) {
+ engine = string(r.defaultQueryEngine)
+ }
+
+ w.Header().Add(EngineHeaderName, engine)
+
+ if engine == string(options.M3QueryEngine) {
+ r.m3QueryHandler(w, req)
+ return
+ }
+
+ r.promqlHandler(w, req)
+}
diff --git a/src/query/api/v1/httpd/router_test.go b/src/query/api/v1/httpd/router_test.go
new file mode 100644
index 0000000000..2e741479f5
--- /dev/null
+++ b/src/query/api/v1/httpd/router_test.go
@@ -0,0 +1,85 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package httpd
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/m3db/m3/src/query/api/v1/options"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHanlerSwitch(t *testing.T) {
+ promqlCalled := 0
+ promqlHandler := func(w http.ResponseWriter, req *http.Request) {
+ promqlCalled++
+ }
+
+ m3qCalled := 0
+ m3qHandler := func(w http.ResponseWriter, req *http.Request) {
+ m3qCalled++
+ }
+
+ router := NewQueryRouter()
+ router.Setup(options.QueryRouterOptions{
+ DefaultQueryEngine: "prometheus",
+ PromqlHandler: promqlHandler,
+ M3QueryHandler: m3qHandler,
+ })
+ rr := httptest.NewRecorder()
+
+ req, err := http.NewRequest("GET", "/query?query=sum(metric)", nil)
+ require.NoError(t, err)
+ router.ServeHTTP(rr, req)
+ assert.Equal(t, 1, promqlCalled)
+ assert.Equal(t, 0, m3qCalled)
+
+ req, err = http.NewRequest("GET", "/query?query=sum(metric)&engine=m3query", nil)
+ require.NoError(t, err)
+ router.ServeHTTP(rr, req)
+ assert.Equal(t, 1, promqlCalled)
+ assert.Equal(t, 1, m3qCalled)
+
+ req, err = http.NewRequest("GET", "/query?query=sum(metric)", nil)
+ req.Header.Add(EngineHeaderName, "m3query")
+ require.NoError(t, err)
+ router.ServeHTTP(rr, req)
+ assert.Equal(t, 1, promqlCalled)
+ assert.Equal(t, 2, m3qCalled)
+
+ req, err = http.NewRequest("GET", "/query?query=sum(metric)", nil)
+ req.Header.Add(EngineHeaderName, "M3QUERY")
+ require.NoError(t, err)
+ router.ServeHTTP(rr, req)
+ assert.Equal(t, 1, promqlCalled)
+ assert.Equal(t, 3, m3qCalled)
+
+ req, err = http.NewRequest("GET", "/query?query=sum(metric)", nil)
+ req.Header.Add(EngineHeaderName, "prometheus")
+ require.NoError(t, err)
+ router.ServeHTTP(rr, req)
+ assert.Equal(t, 2, promqlCalled)
+ assert.Equal(t, 3, m3qCalled)
+}
diff --git a/src/query/api/v1/options/handler.go b/src/query/api/v1/options/handler.go
index 85c8fc6fa6..63b16db547 100644
--- a/src/query/api/v1/options/handler.go
+++ b/src/query/api/v1/options/handler.go
@@ -21,8 +21,9 @@
package options
import (
- "fmt"
+ "io"
"net/http"
+ "strings"
"time"
clusterclient "github.com/m3db/m3/src/cluster/client"
@@ -36,11 +37,30 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ "github.com/prometheus/prometheus/promql"
)
-const defaultTimeout = 30 * time.Second
+// QueryEngine is a type of query engine.
+type QueryEngine string
+
+const (
+ // PrometheusEngine is the prometheus query engine type.
+ PrometheusEngine QueryEngine = "prometheus"
+ // M3QueryEngine is M3 query engine type.
+ M3QueryEngine QueryEngine = "m3query"
+)
+
+// OptionTransformFn transforms given handler options.
+type OptionTransformFn func(opts HandlerOptions) HandlerOptions
+
+// CustomHandlerOptions is a list of custom handler options.
+type CustomHandlerOptions struct {
+ CustomHandlers []CustomHandler
+ OptionTransformFn OptionTransformFn
+}
// CustomHandler allows for custom third party http handlers.
type CustomHandler interface {
@@ -52,6 +72,23 @@ type CustomHandler interface {
Handler(handlerOptions HandlerOptions) (http.Handler, error)
}
+// QueryRouter is responsible for routing queries between promql and m3query.
+type QueryRouter interface {
+ Setup(opts QueryRouterOptions)
+ ServeHTTP(w http.ResponseWriter, req *http.Request)
+}
+
+// QueryRouterOptions defines options for QueryRouter
+type QueryRouterOptions struct {
+ DefaultQueryEngine QueryEngine
+ PromqlHandler func(http.ResponseWriter, *http.Request)
+ M3QueryHandler func(http.ResponseWriter, *http.Request)
+}
+
+// RemoteReadRenderer renders remote read output.
+type RemoteReadRenderer func(io.Writer, []*ts.Series,
+ models.RequestParams, bool)
+
// HandlerOptions represents handler options.
type HandlerOptions interface {
// CreatedAt returns the time the options were created.
@@ -72,6 +109,11 @@ type HandlerOptions interface {
// SetEngine sets the engine.
SetEngine(e executor.Engine) HandlerOptions
+ // PrometheusEngine returns the prometheus engine.
+ PrometheusEngine() *promql.Engine
+ // SetPrometheusEngine sets the prometheus engine.
+ SetPrometheusEngine(e *promql.Engine) HandlerOptions
+
// Clusters returns the clusters.
Clusters() m3.Clusters
// SetClusters sets the clusters.
@@ -137,10 +179,27 @@ type HandlerOptions interface {
// SetNowFn sets the now function.
SetNowFn(f clock.NowFn) HandlerOptions
- // InstrumentOpts returns the instrumentation optoins.
+ // InstrumentOpts returns the instrumentation options.
InstrumentOpts() instrument.Options
// SetInstrumentOpts sets instrumentation options.
SetInstrumentOpts(opts instrument.Options) HandlerOptions
+
+ // DefaultQueryEngine returns the default query engine.
+ DefaultQueryEngine() QueryEngine
+ // SetDefaultQueryEngine returns the default query engine.
+ SetDefaultQueryEngine(value QueryEngine) HandlerOptions
+
+ // QueryRouter is a reference to the router which is responsible for routing
+ // queries between PromQL and M3Query.
+ QueryRouter() QueryRouter
+ // SetQueryRouter sets query router.
+ SetQueryRouter(value QueryRouter) HandlerOptions
+
+ // InstantQueryRouter is a reference to the router which is responsible for
+ // routing instant queries between PromQL and M3Query.
+ InstantQueryRouter() QueryRouter
+ // SetInstantQueryRouter sets query router for instant queries.
+ SetInstantQueryRouter(value QueryRouter) HandlerOptions
}
// HandlerOptions represents handler options.
@@ -148,6 +207,8 @@ type handlerOptions struct {
storage storage.Storage
downsamplerAndWriter ingest.DownsamplerAndWriter
engine executor.Engine
+ prometheusEngine *promql.Engine
+ defaultEngine QueryEngine
clusters m3.Clusters
clusterClient clusterclient.Client
config config.Configuration
@@ -163,6 +224,8 @@ type handlerOptions struct {
placementServiceNames []string
serviceOptionDefaults []handleroptions.ServiceOptionsDefault
nowFn clock.NowFn
+ queryRouter QueryRouter
+ instantQueryRouter QueryRouter
}
// EmptyHandlerOptions returns default handler options.
@@ -178,6 +241,7 @@ func NewHandlerOptions(
downsamplerAndWriter ingest.DownsamplerAndWriter,
tagOptions models.TagOptions,
engine executor.Engine,
+ prometheusEngine *promql.Engine,
m3dbClusters m3.Clusters,
clusterClient clusterclient.Client,
cfg config.Configuration,
@@ -189,31 +253,28 @@ func NewHandlerOptions(
cpuProfileDuration time.Duration,
placementServiceNames []string,
serviceOptionDefaults []handleroptions.ServiceOptionsDefault,
+ queryRouter QueryRouter,
+ instantQueryRouter QueryRouter,
) (HandlerOptions, error) {
- var timeoutOpts = &prometheus.TimeoutOpts{}
- if embeddedDbCfg == nil || embeddedDbCfg.Client.FetchTimeout == nil {
- timeoutOpts.FetchTimeout = defaultTimeout
- } else {
- timeout := *embeddedDbCfg.Client.FetchTimeout
- if timeout <= 0 {
- return nil,
- fmt.Errorf("m3db client fetch timeout should be > 0, is %d", timeout)
- }
-
- timeoutOpts.FetchTimeout = timeout
+ timeout := cfg.Query.TimeoutOrDefault()
+ if embeddedDbCfg != nil &&
+ embeddedDbCfg.Client.FetchTimeout != nil &&
+ *embeddedDbCfg.Client.FetchTimeout > timeout {
+ timeout = *embeddedDbCfg.Client.FetchTimeout
}
return &handlerOptions{
storage: downsamplerAndWriter.Storage(),
downsamplerAndWriter: downsamplerAndWriter,
engine: engine,
+ prometheusEngine: prometheusEngine,
+ defaultEngine: getDefaultQueryEngine(cfg.Query.DefaultEngine),
clusters: m3dbClusters,
clusterClient: clusterClient,
config: cfg,
embeddedDbCfg: embeddedDbCfg,
createdAt: time.Now(),
tagOptions: tagOptions,
- timeoutOpts: timeoutOpts,
enforcer: enforcer,
fetchOptionsBuilder: fetchOptionsBuilder,
queryContextOptions: queryContextOptions,
@@ -222,6 +283,11 @@ func NewHandlerOptions(
placementServiceNames: placementServiceNames,
serviceOptionDefaults: serviceOptionDefaults,
nowFn: time.Now,
+ timeoutOpts: &prometheus.TimeoutOpts{
+ FetchTimeout: timeout,
+ },
+ queryRouter: queryRouter,
+ instantQueryRouter: instantQueryRouter,
}, nil
}
@@ -260,6 +326,16 @@ func (o *handlerOptions) SetEngine(e executor.Engine) HandlerOptions {
return &opts
}
+func (o *handlerOptions) PrometheusEngine() *promql.Engine {
+ return o.prometheusEngine
+}
+
+func (o *handlerOptions) SetPrometheusEngine(e *promql.Engine) HandlerOptions {
+ opts := *o
+ opts.prometheusEngine = e
+ return &opts
+}
+
func (o *handlerOptions) Clusters() m3.Clusters {
return o.clusters
}
@@ -406,3 +482,50 @@ func (o *handlerOptions) SetNowFn(n clock.NowFn) HandlerOptions {
options.nowFn = n
return &options
}
+
+func (o *handlerOptions) DefaultQueryEngine() QueryEngine {
+ return o.defaultEngine
+}
+
+func (o *handlerOptions) SetDefaultQueryEngine(value QueryEngine) HandlerOptions {
+ options := *o
+ options.defaultEngine = value
+ return &options
+}
+
+func getDefaultQueryEngine(cfgEngine string) QueryEngine {
+ engine := PrometheusEngine
+ if strings.ToLower(cfgEngine) == string(M3QueryEngine) {
+ engine = M3QueryEngine
+ }
+ return engine
+}
+
+// IsQueryEngineSet returns true if value contains query engine value. Otherwise returns false.
+func IsQueryEngineSet(v string) bool {
+ if strings.ToLower(v) == string(PrometheusEngine) ||
+ strings.ToLower(v) == string(M3QueryEngine) {
+ return true
+ }
+ return false
+}
+
+func (o *handlerOptions) QueryRouter() QueryRouter {
+ return o.queryRouter
+}
+
+func (o *handlerOptions) SetQueryRouter(value QueryRouter) HandlerOptions {
+ opts := *o
+ opts.queryRouter = value
+ return &opts
+}
+
+func (o *handlerOptions) InstantQueryRouter() QueryRouter {
+ return o.instantQueryRouter
+}
+
+func (o *handlerOptions) SetInstantQueryRouter(value QueryRouter) HandlerOptions {
+ opts := *o
+ opts.instantQueryRouter = value
+ return &opts
+}
diff --git a/src/query/storage/m3/multi_fetch_tags_result_test.go b/src/query/api/v1/options/handler_test.go
similarity index 58%
rename from src/query/storage/m3/multi_fetch_tags_result_test.go
rename to src/query/api/v1/options/handler_test.go
index 98db0558d1..a8a6ba372e 100644
--- a/src/query/storage/m3/multi_fetch_tags_result_test.go
+++ b/src/query/api/v1/options/handler_test.go
@@ -18,39 +18,50 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package m3
+package options
import (
"testing"
- "github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/query/block"
-
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
+ "github.com/influxdata/influxdb/pkg/testing/assert"
)
-func TestExhaustiveTagMerge(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
+func TestDefaultQueryEngineParse(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected QueryEngine
+ }{
+ {
+ name: "given empty sets to prometheus",
+ input: "",
+ expected: PrometheusEngine,
+ },
+ {
+ name: "given random sets to prometheus",
+ input: "random",
+ expected: PrometheusEngine,
+ },
+ {
+ name: "given prometheus sets to prometheus",
+ input: "prometheus",
+ expected: PrometheusEngine,
+ },
+ {
+ name: "given m3query sets to m3query",
+ input: "m3query",
+ expected: M3QueryEngine,
+ },
+ {
+ name: "given camelcase M3Query sets to m3query",
+ input: "M3Query",
+ expected: M3QueryEngine,
+ },
+ }
- r := NewMultiFetchTagsResult()
- for _, tt := range exhaustTests {
+ for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- for _, ex := range tt.exhaustives {
- it := client.NewMockTaggedIDsIterator(ctrl)
- it.EXPECT().Next().Return(false)
- it.EXPECT().Err().Return(nil)
- it.EXPECT().Finalize().Return()
- meta := block.NewResultMetadata()
- meta.Exhaustive = ex
- r.Add(it, meta, nil)
- }
-
- tagResult, err := r.FinalResult()
- assert.NoError(t, err)
- assert.Equal(t, tt.expected, tagResult.Metadata.Exhaustive)
- assert.NoError(t, r.Close())
+ assert.Equal(t, tt.expected, getDefaultQueryEngine(tt.input))
})
}
}
diff --git a/src/query/block/block_mock.go b/src/query/block/block_mock.go
index f7b0eb039b..33d6e109cd 100644
--- a/src/query/block/block_mock.go
+++ b/src/query/block/block_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/query/block (interfaces: Block,StepIter,Builder,Step,SeriesIter)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/query/block/column.go b/src/query/block/column.go
index d4d3bab9a9..86f406a566 100644
--- a/src/query/block/column.go
+++ b/src/query/block/column.go
@@ -237,6 +237,10 @@ func (cb ColumnBlockBuilder) PopulateColumns(size int) {
for i := range cb.block.columns {
cb.block.columns[i] = column{Values: cols[size*i : size*(i+1)]}
}
+
+ // NB: initialize a clean series meta list with given cap and len,
+ // as row operations are done by arbitrary index.
+ cb.block.seriesMeta = make([]SeriesMeta, size)
}
// SetRow sets a given block row to the given values and metadata.
diff --git a/src/query/block/column_test.go b/src/query/block/column_test.go
index d2f86341af..ee1c9392bc 100644
--- a/src/query/block/column_test.go
+++ b/src/query/block/column_test.go
@@ -22,20 +22,26 @@ package block
import (
"context"
+ "fmt"
"testing"
+ "time"
"github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/models"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"github.com/uber-go/tally"
)
-func TestColumnBuilderInfoTypes(t *testing.T) {
- ctx := models.NewQueryContext(context.Background(),
+func makeTestQueryContext() *models.QueryContext {
+ return models.NewQueryContext(context.Background(),
tally.NoopScope, cost.NoopChainedEnforcer(),
models.QueryContextOptions{})
+}
+func TestColumnBuilderInfoTypes(t *testing.T) {
+ ctx := makeTestQueryContext()
builder := NewColumnBlockBuilder(ctx, Metadata{}, []SeriesMeta{})
block := builder.Build()
assert.Equal(t, BlockDecompressed, block.Info().blockType)
@@ -43,3 +49,60 @@ func TestColumnBuilderInfoTypes(t *testing.T) {
block = builder.BuildAsType(BlockScalar)
assert.Equal(t, BlockScalar, block.Info().blockType)
}
+
+func TestSetRow(t *testing.T) {
+ buildMeta := func(i int) SeriesMeta {
+ name := fmt.Sprint(i)
+
+ return SeriesMeta{
+ Name: []byte(name),
+ Tags: models.MustMakeTags("name", name),
+ }
+ }
+
+ size := 10
+ metas := make([]SeriesMeta, size)
+ for i := range metas {
+ metas[i] = buildMeta(i)
+ }
+
+ ctx := makeTestQueryContext()
+ builder := NewColumnBlockBuilder(ctx, Metadata{
+ Bounds: models.Bounds{StepSize: time.Minute, Duration: time.Minute},
+ }, nil)
+
+ require.NoError(t, builder.AddCols(1))
+ builder.PopulateColumns(size)
+ // NB: set the row metas backwards.
+ j := 0
+ for i := size - 1; i >= 0; i-- {
+ err := builder.SetRow(j, []float64{float64(i)}, metas[i])
+ require.NoError(t, err)
+ j++
+ }
+
+ bl := builder.Build()
+ it, err := bl.StepIter()
+ require.NoError(t, err)
+
+ actualMetas := it.SeriesMeta()
+ for i, m := range actualMetas {
+ ex := fmt.Sprint(size - 1 - i)
+ assert.Equal(t, ex, string(m.Name))
+ require.Equal(t, 1, m.Tags.Len())
+ tag, found := m.Tags.Get([]byte("name"))
+ require.True(t, found)
+ assert.Equal(t, ex, string(tag))
+ }
+
+ assert.True(t, it.Next())
+ exVals := make([]float64, size)
+ for i := range exVals {
+ exVals[i] = float64(size - 1 - i)
+ }
+
+ vals := it.Current().Values()
+ assert.Equal(t, exVals, vals)
+ assert.False(t, it.Next())
+ assert.NoError(t, it.Err())
+}
diff --git a/src/query/block/meta.go b/src/query/block/meta.go
index 1fd70e5e29..0272b5528a 100644
--- a/src/query/block/meta.go
+++ b/src/query/block/meta.go
@@ -112,6 +112,35 @@ func combineWarnings(a, b Warnings) Warnings {
return nil
}
+// Equals determines if two result metadatas are equal.
+func (m ResultMetadata) Equals(n ResultMetadata) bool {
+ if m.Exhaustive && !n.Exhaustive || !m.Exhaustive && n.Exhaustive {
+ return false
+ }
+
+ if m.LocalOnly && !n.LocalOnly || !m.LocalOnly && n.LocalOnly {
+ return false
+ }
+
+ if len(m.Resolutions) != len(n.Resolutions) {
+ return false
+ }
+
+ for i, mRes := range m.Resolutions {
+ if n.Resolutions[i] != mRes {
+ return false
+ }
+ }
+
+ for i, mWarn := range m.Warnings {
+ if !n.Warnings[i].equals(mWarn) {
+ return false
+ }
+ }
+
+ return true
+}
+
// CombineMetadata combines two result metadatas.
func (m ResultMetadata) CombineMetadata(other ResultMetadata) ResultMetadata {
meta := ResultMetadata{
@@ -159,6 +188,25 @@ func (w Warnings) addWarnings(warnings ...Warning) Warnings {
return w
}
+// WarningStrings converts warnings to a slice of strings for presentation.
+func (m ResultMetadata) WarningStrings() []string {
+ size := len(m.Warnings)
+ if !m.Exhaustive {
+ size++
+ }
+
+ strs := make([]string, 0, size)
+ for _, warn := range m.Warnings {
+ strs = append(strs, warn.Header())
+ }
+
+ if !m.Exhaustive {
+ strs = append(strs, "m3db exceeded query limit: results not exhaustive")
+ }
+
+ return strs
+}
+
// Warning is a message that indicates potential partial or incomplete results.
type Warning struct {
// Name is the name of the store originating the warning.
diff --git a/src/query/cost/cost_mock.go b/src/query/cost/cost_mock.go
index b0d92fae56..85abd1379b 100644
--- a/src/query/cost/cost_mock.go
+++ b/src/query/cost/cost_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/m3db/m3/src/query/cost/go
+// Source: github.com/m3db/m3/src/query/cost (interfaces: ChainedEnforcer,ChainedReporter)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -56,46 +56,31 @@ func (m *MockChainedEnforcer) EXPECT() *MockChainedEnforcerMockRecorder {
}
// Add mocks base method
-func (m *MockChainedEnforcer) Add(op cost0.Cost) cost0.Report {
+func (m *MockChainedEnforcer) Add(arg0 cost0.Cost) cost0.Report {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Add", op)
+ ret := m.ctrl.Call(m, "Add", arg0)
ret0, _ := ret[0].(cost0.Report)
return ret0
}
// Add indicates an expected call of Add
-func (mr *MockChainedEnforcerMockRecorder) Add(op interface{}) *gomock.Call {
+func (mr *MockChainedEnforcerMockRecorder) Add(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockChainedEnforcer)(nil).Add), op)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockChainedEnforcer)(nil).Add), arg0)
}
-// State mocks base method
-func (m *MockChainedEnforcer) State() (cost0.Report, cost0.Limit) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "State")
- ret0, _ := ret[0].(cost0.Report)
- ret1, _ := ret[1].(cost0.Limit)
- return ret0, ret1
-}
-
-// State indicates an expected call of State
-func (mr *MockChainedEnforcerMockRecorder) State() *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockChainedEnforcer)(nil).State))
-}
-
-// Limit mocks base method
-func (m *MockChainedEnforcer) Limit() cost0.Limit {
+// Child mocks base method
+func (m *MockChainedEnforcer) Child(arg0 string) ChainedEnforcer {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Limit")
- ret0, _ := ret[0].(cost0.Limit)
+ ret := m.ctrl.Call(m, "Child", arg0)
+ ret0, _ := ret[0].(ChainedEnforcer)
return ret0
}
-// Limit indicates an expected call of Limit
-func (mr *MockChainedEnforcerMockRecorder) Limit() *gomock.Call {
+// Child indicates an expected call of Child
+func (mr *MockChainedEnforcerMockRecorder) Child(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Limit", reflect.TypeOf((*MockChainedEnforcer)(nil).Limit))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Child", reflect.TypeOf((*MockChainedEnforcer)(nil).Child), arg0)
}
// Clone mocks base method
@@ -112,44 +97,59 @@ func (mr *MockChainedEnforcerMockRecorder) Clone() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clone", reflect.TypeOf((*MockChainedEnforcer)(nil).Clone))
}
-// Reporter mocks base method
-func (m *MockChainedEnforcer) Reporter() cost0.EnforcerReporter {
+// Close mocks base method
+func (m *MockChainedEnforcer) Close() {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Reporter")
- ret0, _ := ret[0].(cost0.EnforcerReporter)
+ m.ctrl.Call(m, "Close")
+}
+
+// Close indicates an expected call of Close
+func (mr *MockChainedEnforcerMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockChainedEnforcer)(nil).Close))
+}
+
+// Limit mocks base method
+func (m *MockChainedEnforcer) Limit() cost0.Limit {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Limit")
+ ret0, _ := ret[0].(cost0.Limit)
return ret0
}
-// Reporter indicates an expected call of Reporter
-func (mr *MockChainedEnforcerMockRecorder) Reporter() *gomock.Call {
+// Limit indicates an expected call of Limit
+func (mr *MockChainedEnforcerMockRecorder) Limit() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reporter", reflect.TypeOf((*MockChainedEnforcer)(nil).Reporter))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Limit", reflect.TypeOf((*MockChainedEnforcer)(nil).Limit))
}
-// Child mocks base method
-func (m *MockChainedEnforcer) Child(resourceName string) ChainedEnforcer {
+// Reporter mocks base method
+func (m *MockChainedEnforcer) Reporter() cost0.EnforcerReporter {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "Child", resourceName)
- ret0, _ := ret[0].(ChainedEnforcer)
+ ret := m.ctrl.Call(m, "Reporter")
+ ret0, _ := ret[0].(cost0.EnforcerReporter)
return ret0
}
-// Child indicates an expected call of Child
-func (mr *MockChainedEnforcerMockRecorder) Child(resourceName interface{}) *gomock.Call {
+// Reporter indicates an expected call of Reporter
+func (mr *MockChainedEnforcerMockRecorder) Reporter() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Child", reflect.TypeOf((*MockChainedEnforcer)(nil).Child), resourceName)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reporter", reflect.TypeOf((*MockChainedEnforcer)(nil).Reporter))
}
-// Close mocks base method
-func (m *MockChainedEnforcer) Close() {
+// State mocks base method
+func (m *MockChainedEnforcer) State() (cost0.Report, cost0.Limit) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "Close")
+ ret := m.ctrl.Call(m, "State")
+ ret0, _ := ret[0].(cost0.Report)
+ ret1, _ := ret[1].(cost0.Limit)
+ return ret0, ret1
}
-// Close indicates an expected call of Close
-func (mr *MockChainedEnforcerMockRecorder) Close() *gomock.Call {
+// State indicates an expected call of State
+func (mr *MockChainedEnforcerMockRecorder) State() *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockChainedEnforcer)(nil).Close))
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockChainedEnforcer)(nil).State))
}
// MockChainedReporter is a mock of ChainedReporter interface
@@ -175,62 +175,62 @@ func (m *MockChainedReporter) EXPECT() *MockChainedReporterMockRecorder {
return m.recorder
}
-// ReportCost mocks base method
-func (m *MockChainedReporter) ReportCost(c cost0.Cost) {
+// OnChildClose mocks base method
+func (m *MockChainedReporter) OnChildClose(arg0 cost0.Cost) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "ReportCost", c)
+ m.ctrl.Call(m, "OnChildClose", arg0)
}
-// ReportCost indicates an expected call of ReportCost
-func (mr *MockChainedReporterMockRecorder) ReportCost(c interface{}) *gomock.Call {
+// OnChildClose indicates an expected call of OnChildClose
+func (mr *MockChainedReporterMockRecorder) OnChildClose(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCost", reflect.TypeOf((*MockChainedReporter)(nil).ReportCost), c)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnChildClose", reflect.TypeOf((*MockChainedReporter)(nil).OnChildClose), arg0)
}
-// ReportCurrent mocks base method
-func (m *MockChainedReporter) ReportCurrent(c cost0.Cost) {
+// OnClose mocks base method
+func (m *MockChainedReporter) OnClose(arg0 cost0.Cost) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "ReportCurrent", c)
+ m.ctrl.Call(m, "OnClose", arg0)
}
-// ReportCurrent indicates an expected call of ReportCurrent
-func (mr *MockChainedReporterMockRecorder) ReportCurrent(c interface{}) *gomock.Call {
+// OnClose indicates an expected call of OnClose
+func (mr *MockChainedReporterMockRecorder) OnClose(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCurrent", reflect.TypeOf((*MockChainedReporter)(nil).ReportCurrent), c)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnClose", reflect.TypeOf((*MockChainedReporter)(nil).OnClose), arg0)
}
-// ReportOverLimit mocks base method
-func (m *MockChainedReporter) ReportOverLimit(enabled bool) {
+// ReportCost mocks base method
+func (m *MockChainedReporter) ReportCost(arg0 cost0.Cost) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "ReportOverLimit", enabled)
+ m.ctrl.Call(m, "ReportCost", arg0)
}
-// ReportOverLimit indicates an expected call of ReportOverLimit
-func (mr *MockChainedReporterMockRecorder) ReportOverLimit(enabled interface{}) *gomock.Call {
+// ReportCost indicates an expected call of ReportCost
+func (mr *MockChainedReporterMockRecorder) ReportCost(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportOverLimit", reflect.TypeOf((*MockChainedReporter)(nil).ReportOverLimit), enabled)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCost", reflect.TypeOf((*MockChainedReporter)(nil).ReportCost), arg0)
}
-// OnChildClose mocks base method
-func (m *MockChainedReporter) OnChildClose(currentCost cost0.Cost) {
+// ReportCurrent mocks base method
+func (m *MockChainedReporter) ReportCurrent(arg0 cost0.Cost) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "OnChildClose", currentCost)
+ m.ctrl.Call(m, "ReportCurrent", arg0)
}
-// OnChildClose indicates an expected call of OnChildClose
-func (mr *MockChainedReporterMockRecorder) OnChildClose(currentCost interface{}) *gomock.Call {
+// ReportCurrent indicates an expected call of ReportCurrent
+func (mr *MockChainedReporterMockRecorder) ReportCurrent(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnChildClose", reflect.TypeOf((*MockChainedReporter)(nil).OnChildClose), currentCost)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportCurrent", reflect.TypeOf((*MockChainedReporter)(nil).ReportCurrent), arg0)
}
-// OnClose mocks base method
-func (m *MockChainedReporter) OnClose(currentCost cost0.Cost) {
+// ReportOverLimit mocks base method
+func (m *MockChainedReporter) ReportOverLimit(arg0 bool) {
m.ctrl.T.Helper()
- m.ctrl.Call(m, "OnClose", currentCost)
+ m.ctrl.Call(m, "ReportOverLimit", arg0)
}
-// OnClose indicates an expected call of OnClose
-func (mr *MockChainedReporterMockRecorder) OnClose(currentCost interface{}) *gomock.Call {
+// ReportOverLimit indicates an expected call of ReportOverLimit
+func (mr *MockChainedReporterMockRecorder) ReportOverLimit(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnClose", reflect.TypeOf((*MockChainedReporter)(nil).OnClose), currentCost)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportOverLimit", reflect.TypeOf((*MockChainedReporter)(nil).ReportOverLimit), arg0)
}
diff --git a/src/query/executor/engine.go b/src/query/executor/engine.go
index 10ca4f1cbe..584d141388 100644
--- a/src/query/executor/engine.go
+++ b/src/query/executor/engine.go
@@ -24,6 +24,7 @@ import (
"context"
"time"
+ "github.com/m3db/m3/src/query/block"
qcost "github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
@@ -43,12 +44,6 @@ type QueryOptions struct {
QueryContextOptions models.QueryContextOptions
}
-// Query is the result after execution.
-type Query struct {
- Err error
- Result Result
-}
-
// NewEngine returns a new instance of QueryExecutor.
func NewEngine(
engineOpts EngineOptions,
@@ -124,7 +119,7 @@ func (e *engine) ExecuteExpr(
opts *QueryOptions,
fetchOpts *storage.FetchOptions,
params models.RequestParams,
-) (Result, error) {
+) (block.Block, error) {
perQueryEnforcer := e.opts.GlobalEnforcer().Child(qcost.QueryLevel)
defer perQueryEnforcer.Close()
req := newRequest(e, params, fetchOpts, e.opts.InstrumentOptions())
@@ -147,20 +142,16 @@ func (e *engine) ExecuteExpr(
sp, ctx := opentracing.StartSpanFromContext(ctx, "executing")
defer sp.Finish()
- result := state.resultNode
scope := e.opts.InstrumentOptions().MetricsScope()
queryCtx := models.NewQueryContext(ctx, scope, perQueryEnforcer,
opts.QueryContextOptions)
- go func() {
- if err := state.Execute(queryCtx); err != nil {
- result.abort(err)
- } else {
- result.done()
- }
- }()
+ if err := state.Execute(queryCtx); err != nil {
+ state.sink.closeWithError(err)
+ return nil, err
+ }
- return result, nil
+ return state.sink.getValue()
}
func (e *engine) Options() EngineOptions {
diff --git a/src/query/executor/engine_test.go b/src/query/executor/engine_test.go
index a779660f2a..8906c613c1 100644
--- a/src/query/executor/engine_test.go
+++ b/src/query/executor/engine_test.go
@@ -27,14 +27,15 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/cost"
qcost "github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser/promql"
"github.com/m3db/m3/src/query/storage"
- "github.com/m3db/m3/src/query/storage/mock"
"github.com/m3db/m3/src/query/test/m3"
"github.com/m3db/m3/src/x/instrument"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -56,8 +57,8 @@ func newEngine(
return NewEngine(engineOpts)
}
-func TestEngine_Execute(t *testing.T) {
- ctrl := gomock.NewController(t)
+func TestExecute(t *testing.T) {
+ ctrl := xtest.NewController(t)
store, session := m3.NewStorageAndSession(t, ctrl)
session.EXPECT().FetchTagged(gomock.Any(), gomock.Any(),
gomock.Any()).Return(nil, client.FetchResponseMetadata{Exhaustive: false}, fmt.Errorf("dummy"))
@@ -70,8 +71,8 @@ func TestEngine_Execute(t *testing.T) {
assert.NotNil(t, err)
}
-func TestEngine_ExecuteExpr(t *testing.T) {
- ctrl := gomock.NewController(t)
+func TestExecuteExpr(t *testing.T) {
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
mockEnforcer := cost.NewMockChainedEnforcer(ctrl)
@@ -84,7 +85,12 @@ func TestEngine_ExecuteExpr(t *testing.T) {
models.NewTagOptions(), promql.NewParseOptions())
require.NoError(t, err)
- engine := newEngine(mock.NewMockStorage(), defaultLookbackDuration,
+ store := storage.NewMockStorage(ctrl)
+ store.EXPECT().FetchBlocks(gomock.Any(), gomock.Any(), gomock.Any()).
+ Return(block.Result{
+ Blocks: []block.Block{block.NewMockBlock(ctrl)},
+ }, nil)
+ engine := newEngine(store, defaultLookbackDuration,
mockParent, instrument.NewOptions())
_, err = engine.ExecuteExpr(context.TODO(), parser,
&QueryOptions{}, storage.NewFetchOptions(), models.RequestParams{
diff --git a/src/query/executor/result.go b/src/query/executor/result.go
index af65be0a23..053b785639 100644
--- a/src/query/executor/result.go
+++ b/src/query/executor/result.go
@@ -24,79 +24,74 @@ import (
"sync"
"github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/executor/transform"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
-
"github.com/pkg/errors"
)
-const (
- // TODO: Get from config
- channelSize = 100
-)
+type sink interface {
+ transform.OpNode
+ closeWithError(err error)
+ getValue() (block.Block, error)
+}
-var (
- errAborted = errors.New("the query has been aborted")
-)
+// resultNode collects final blocks.
+type resultNode struct {
+ sync.RWMutex
+ wg sync.WaitGroup
-// Result provides the execution results
-type Result interface {
- abort(err error)
- done()
- ResultChan() chan ResultChan
+ err error
+ completed bool
+ block block.Block
}
-// ResultNode is used to provide the results to the caller from the query execution
-type ResultNode struct {
- mu sync.Mutex
- resultChan chan ResultChan
- aborted bool
+func newResultNode() sink {
+ node := &resultNode{}
+ node.wg.Add(1)
+ return node
}
-// ResultChan has the result from a block
-type ResultChan struct {
- Block block.Block
- Err error
+func (r *resultNode) closeWithLock() {
+ r.wg.Done()
+ r.completed = true
}
-func newResultNode() *ResultNode {
- blocks := make(chan ResultChan, channelSize)
- return &ResultNode{resultChan: blocks}
-}
+// Process sets the incoming block and releases the wait group.
+func (r *resultNode) Process(_ *models.QueryContext,
+ _ parser.NodeID, block block.Block) error {
+ r.Lock()
+ defer r.Unlock()
-// Process the block
-func (r *ResultNode) Process(queryCtx *models.QueryContext, ID parser.NodeID, block block.Block) error {
- if r.aborted {
- return errAborted
+ if r.err != nil {
+ return r.err
}
- r.resultChan <- ResultChan{
- Block: block,
+ if r.block != nil {
+ r.err = errors.New("resultNode block already set")
+ return r.err
}
+ r.block = block
+ r.closeWithLock()
return nil
}
-// ResultChan return a channel to stream back resultChan to the client
-func (r *ResultNode) ResultChan() chan ResultChan {
- return r.resultChan
-}
-
-// TODO: Signal error downstream
-func (r *ResultNode) abort(err error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.aborted {
+func (r *resultNode) closeWithError(err error) {
+ r.Lock()
+ defer r.Unlock()
+ if r.completed {
return
}
- r.aborted = true
- r.resultChan <- ResultChan{
- Err: err,
- }
- close(r.resultChan)
+ r.err = err
+ r.closeWithLock()
}
-func (r *ResultNode) done() {
- close(r.resultChan)
+func (r *resultNode) getValue() (block.Block, error) {
+ r.wg.Wait()
+ r.RLock()
+ bl, err := r.block, r.err
+ r.RUnlock()
+ return bl, err
}
diff --git a/src/query/executor/state.go b/src/query/executor/state.go
index 79f9d4c044..ba37ed2a69 100644
--- a/src/query/executor/state.go
+++ b/src/query/executor/state.go
@@ -37,10 +37,10 @@ import (
// ExecutionState represents the execution hierarchy.
type ExecutionState struct {
- plan plan.PhysicalPlan
- sources []parser.Source
- resultNode Result
- storage storage.Storage
+ plan plan.PhysicalPlan
+ sources []parser.Source
+ sink sink
+ storage storage.Storage
}
// CreateSource creates a source node.
@@ -126,9 +126,9 @@ func GenerateExecutionState(
return nil, errors.New("empty sources for the execution state")
}
- rNode := newResultNode()
- state.resultNode = rNode
- controller.AddTransform(rNode)
+ sink := newResultNode()
+ state.sink = sink
+ controller.AddTransform(sink)
return state, nil
}
@@ -181,12 +181,12 @@ func (s *ExecutionState) createNode(
// Execute the sources in parallel and return the first error.
func (s *ExecutionState) Execute(queryCtx *models.QueryContext) error {
- requests := make([]execution.Request, len(s.sources))
- for idx, source := range s.sources {
- requests[idx] = sourceRequest{
+ requests := make([]execution.Request, 0, len(s.sources))
+ for _, source := range s.sources {
+ requests = append(requests, sourceRequest{
source: source,
queryCtx: queryCtx,
- }
+ })
}
return execution.ExecuteParallel(queryCtx.Ctx, requests)
@@ -194,8 +194,7 @@ func (s *ExecutionState) Execute(queryCtx *models.QueryContext) error {
// String representation of the state.
func (s *ExecutionState) String() string {
- return fmt.Sprintf("plan: %s\nsources: %s\nresult: %s",
- s.plan, s.sources, s.resultNode)
+ return fmt.Sprintf("plan: %s\nsources: %s\n", s.plan, s.sources)
}
type sourceRequest struct {
diff --git a/src/query/executor/transform/exec.go b/src/query/executor/transform/exec.go
index 5b2890dee8..3812250793 100644
--- a/src/query/executor/transform/exec.go
+++ b/src/query/executor/transform/exec.go
@@ -61,7 +61,7 @@ func ProcessSimpleBlock(
// be closed, as they would free underlying data. The general story in block
// lifecycle should be revisited to remove quirks arising from these edge
// cases (something where blocks are responsible for calling their own
- // downstreams would seem more intuative and allow finer grained lifecycle
+ // downstreams would seem more intuitive and allow finer grained lifecycle
// control).
err = controller.Process(queryCtx, nextBlock)
if nextBlock.Info().Type() != block.BlockLazy {
diff --git a/src/query/executor/transform/exec_test.go b/src/query/executor/transform/exec_test.go
index 58926fdaa3..d9e1335396 100644
--- a/src/query/executor/transform/exec_test.go
+++ b/src/query/executor/transform/exec_test.go
@@ -31,6 +31,7 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
"github.com/m3db/m3/src/query/test"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/opentracing/opentracing-go"
@@ -51,7 +52,7 @@ func TestProcessSimpleBlock(t *testing.T) {
}
setup := func(t *testing.T) (*testContext, func()) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
controller := &Controller{
ID: parser.NodeID("foo"),
diff --git a/src/query/executor/types.go b/src/query/executor/types.go
index d3de67d952..c5194c2f84 100644
--- a/src/query/executor/types.go
+++ b/src/query/executor/types.go
@@ -24,6 +24,7 @@ import (
"context"
"time"
+ "github.com/m3db/m3/src/query/block"
qcost "github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
@@ -50,7 +51,7 @@ type Engine interface {
opts *QueryOptions,
fetchOpts *storage.FetchOptions,
params models.RequestParams,
- ) (Result, error)
+ ) (block.Block, error)
// Options returns the currently configured options.
Options() EngineOptions
diff --git a/src/query/executor/types_mock.go b/src/query/executor/types_mock.go
index d2be11a12a..4edb8181dd 100644
--- a/src/query/executor/types_mock.go
+++ b/src/query/executor/types_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/query/executor (interfaces: Engine)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -28,6 +28,7 @@ import (
"context"
"reflect"
+ "github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
"github.com/m3db/m3/src/query/storage"
@@ -73,10 +74,10 @@ func (mr *MockEngineMockRecorder) Close() *gomock.Call {
}
// ExecuteExpr mocks base method
-func (m *MockEngine) ExecuteExpr(arg0 context.Context, arg1 parser.Parser, arg2 *QueryOptions, arg3 *storage.FetchOptions, arg4 models.RequestParams) (Result, error) {
+func (m *MockEngine) ExecuteExpr(arg0 context.Context, arg1 parser.Parser, arg2 *QueryOptions, arg3 *storage.FetchOptions, arg4 models.RequestParams) (block.Block, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExecuteExpr", arg0, arg1, arg2, arg3, arg4)
- ret0, _ := ret[0].(Result)
+ ret0, _ := ret[0].(block.Block)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/src/query/functions/aggregation/base_test.go b/src/query/functions/aggregation/base_test.go
index 91bbb453cd..a9a7d7f6b7 100644
--- a/src/query/functions/aggregation/base_test.go
+++ b/src/query/functions/aggregation/base_test.go
@@ -79,22 +79,22 @@ func TestFunctionFilteringWithA(t *testing.T) {
require.NoError(t, err)
sink := processAggregationOp(t, op)
expected := [][]float64{
+ // stddev of fifth and sixth series
+ {250, 250, 250, 250, 250},
// stddev of first three series
{5, 7, 12.19289, 16.39105, 20.60744},
// stddev of fourth series
{0, 0, 0, 0, 0},
- // stddev of fifth and sixth series
- {250, 250, 250, 250, 250},
}
expectedMetas := []block.SeriesMeta{
+ {Name: typeBytes, Tags: models.EmptyTags()},
{Name: typeBytes, Tags: test.TagSliceToTags([]models.Tag{{Name: []byte("a"), Value: []byte("1")}})},
{Name: typeBytes, Tags: test.TagSliceToTags([]models.Tag{{Name: []byte("a"), Value: []byte("2")}})},
- {Name: typeBytes, Tags: models.EmptyTags()},
}
expectedMetaTags := models.EmptyTags()
- test.CompareValues(t, sink.Metas, expectedMetas, sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, expectedMetas, sink.Values, expected)
assert.Equal(t, bounds, sink.Meta.Bounds)
assert.Equal(t, expectedMetaTags.Tags, sink.Meta.Tags.Tags)
}
@@ -106,22 +106,22 @@ func TestFunctionFilteringWithoutA(t *testing.T) {
require.NoError(t, err)
sink := processAggregationOp(t, op)
expected := [][]float64{
- // stddev of first two series
- {0, 0, 2.5, 2.5, 2.5},
// stddev of third, fourth, and fifth series
{36.81787, 77.17225, 118.97712, 161.10728, 203.36065},
// stddev of sixth series
{0, 0, 0, 0, 0},
+ // stddev of first two series
+ {0, 0, 2.5, 2.5, 2.5},
}
expectedMetas := []block.SeriesMeta{
- {Name: typeBytes, Tags: models.EmptyTags()},
{Name: typeBytes, Tags: test.TagSliceToTags([]models.Tag{{Name: []byte("b"), Value: []byte("2")}})},
{Name: typeBytes, Tags: test.TagSliceToTags([]models.Tag{{Name: []byte("c"), Value: []byte("3")}})},
+ {Name: typeBytes, Tags: models.EmptyTags()},
}
expectedMetaTags := test.TagSliceToTags([]models.Tag{{Name: []byte("d"), Value: []byte("4")}})
- test.CompareValues(t, sink.Metas, expectedMetas, sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, expectedMetas, sink.Values, expected)
assert.Equal(t, bounds, sink.Meta.Bounds)
assert.Equal(t, expectedMetaTags.Tags, sink.Meta.Tags.Tags)
}
@@ -142,7 +142,7 @@ func TestFunctionFilteringWithD(t *testing.T) {
}
expectedMetaTags := test.TagSliceToTags([]models.Tag{{Name: []byte("d"), Value: []byte("4")}})
- test.CompareValues(t, sink.Metas, expectedMetas, sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, expectedMetas, sink.Values, expected)
assert.Equal(t, bounds, sink.Meta.Bounds)
assert.Equal(t, expectedMetaTags.Tags, sink.Meta.Tags.Tags)
}
@@ -176,7 +176,7 @@ func TestFunctionFilteringWithoutD(t *testing.T) {
}
expectedMetaTags := models.EmptyTags()
- test.CompareValues(t, sink.Metas, expectedMetas, sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, expectedMetas, sink.Values, expected)
assert.Equal(t, bounds, sink.Meta.Bounds)
assert.Equal(t, expectedMetaTags.Tags, sink.Meta.Tags.Tags)
}
diff --git a/src/query/functions/aggregation/count_values_test.go b/src/query/functions/aggregation/count_values_test.go
index 75f9a1277b..7404cedb7b 100644
--- a/src/query/functions/aggregation/count_values_test.go
+++ b/src/query/functions/aggregation/count_values_test.go
@@ -119,7 +119,7 @@ func TestSimpleProcessCountValuesFunctionUnfiltered(t *testing.T) {
assert.Equal(t, bounds, sink.Meta.Bounds)
ex := test.TagSliceToTags([]models.Tag{{Name: []byte(tagName), Value: []byte("0")}})
assert.Equal(t, ex.Tags, sink.Meta.Tags.Tags)
- test.CompareValues(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
}
func TestSimpleProcessCountValuesFunctionFilteringWithoutA(t *testing.T) {
@@ -143,7 +143,7 @@ func TestSimpleProcessCountValuesFunctionFilteringWithoutA(t *testing.T) {
assert.Equal(t, bounds, sink.Meta.Bounds)
exTags := test.TagSliceToTags([]models.Tag{{Name: []byte(tagName), Value: []byte("0")}})
assert.Equal(t, exTags.Tags, sink.Meta.Tags.Tags)
- test.CompareValues(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
}
func TestCustomProcessCountValuesFunctionFilteringWithoutA(t *testing.T) {
@@ -195,7 +195,7 @@ func TestCustomProcessCountValuesFunctionFilteringWithoutA(t *testing.T) {
require.Equal(t, len(expectedTags), len(expected))
assert.Equal(t, bounds, sink.Meta.Bounds)
assert.Equal(t, models.EmptyTags(), sink.Meta.Tags)
- test.CompareValues(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
}
func TestSimpleProcessCountValuesFunctionFilteringWithA(t *testing.T) {
@@ -213,7 +213,7 @@ func TestSimpleProcessCountValuesFunctionFilteringWithA(t *testing.T) {
assert.Equal(t, bounds, sink.Meta.Bounds)
assert.Equal(t, test.TagSliceToTags([]models.Tag{{Name: []byte(tagName), Value: []byte("0")},
{Name: []byte("a"), Value: []byte("1")}}).Tags, sink.Meta.Tags.Tags)
- test.CompareValues(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, tagsToSeriesMeta(expectedTags), sink.Values, expected)
}
func TestProcessCountValuesFunctionFilteringWithoutA(t *testing.T) {
diff --git a/src/query/functions/aggregation/quantile_test.go b/src/query/functions/aggregation/quantile_test.go
index 2943596c4e..a6f9be8ef4 100644
--- a/src/query/functions/aggregation/quantile_test.go
+++ b/src/query/functions/aggregation/quantile_test.go
@@ -142,22 +142,22 @@ func TestQuantileFunctionFilteringWithoutA(t *testing.T) {
require.NoError(t, err)
sink := processAggregationOp(t, op)
expected := [][]float64{
- // 0.6 quantile of first two series
- {0, 6, 5, 6, 7},
// 0.6 quantile of third, fourth, and fifth series
{60, 88, 116, 144, 172},
// stddev of sixth series
{600, 700, 800, 900, 1000},
+ // 0.6 quantile of first two series
+ {0, 6, 5, 6, 7},
}
expectedMetas := []block.SeriesMeta{
- {Name: typeBytesQuantile, Tags: models.EmptyTags()},
{Name: typeBytesQuantile, Tags: test.TagSliceToTags([]models.Tag{{Name: []byte("b"), Value: []byte("2")}})},
{Name: typeBytesQuantile, Tags: test.TagSliceToTags([]models.Tag{{Name: []byte("c"), Value: []byte("3")}})},
+ {Name: typeBytesQuantile, Tags: models.EmptyTags()},
}
expectedMetaTags := test.TagSliceToTags([]models.Tag{{Name: []byte("d"), Value: []byte("4")}})
- test.CompareValues(t, sink.Metas, expectedMetas, sink.Values, expected)
+ test.CompareValuesInOrder(t, sink.Metas, expectedMetas, sink.Values, expected)
assert.Equal(t, bounds, sink.Meta.Bounds)
assert.Equal(t, expectedMetaTags.Tags, sink.Meta.Tags.Tags)
}
diff --git a/src/query/functions/binary/binary.go b/src/query/functions/binary/binary.go
index e6cd237314..14fc70892e 100644
--- a/src/query/functions/binary/binary.go
+++ b/src/query/functions/binary/binary.go
@@ -251,6 +251,9 @@ func intersect(
if rIdx, ok := rightSigs[id]; ok {
takeLeft = append(takeLeft, lIdx)
correspondingRight = append(correspondingRight, rIdx)
+ if matching.On && matching.Card == CardOneToOne && len(matching.MatchingLabels) > 0 {
+ ls.Tags = ls.Tags.TagsWithKeys(matching.MatchingLabels)
+ }
leftMetas = append(leftMetas, ls)
}
}
diff --git a/src/query/functions/binary/binary_test.go b/src/query/functions/binary/binary_test.go
index 048675d1e5..5dc03b4337 100644
--- a/src/query/functions/binary/binary_test.go
+++ b/src/query/functions/binary/binary_test.go
@@ -1027,3 +1027,94 @@ func TestBinaryFunctionWithDifferentNames(t *testing.T) {
assert.Equal(t, expectedMeta, sink.Meta)
assert.Equal(t, expectedMetas, sink.Metas)
}
+
+func TestOneToOneMatcher(t *testing.T) {
+ now := time.Now()
+
+ meta := func(bounds models.Bounds, name string, m block.ResultMetadata) block.Metadata {
+ return block.Metadata{
+ Bounds: bounds,
+ Tags: models.NewTags(1, models.NewTagOptions()).SetName([]byte(name)),
+ ResultMetadata: m,
+ }
+ }
+
+ var (
+ bounds = models.Bounds{
+ Start: now,
+ Duration: time.Minute * 3,
+ StepSize: time.Minute,
+ }
+
+ lhsResultMeta = block.ResultMetadata{
+ LocalOnly: true,
+ Exhaustive: false,
+ Warnings: []block.Warning{},
+ }
+
+ lhsMeta = meta(bounds, "left", lhsResultMeta)
+ lhsMetas = test.NewSeriesMeta("a", 2)
+ lhs = [][]float64{{1, 2, 3}, {4, 5, 6}}
+ left = test.NewBlockFromValuesWithMetaAndSeriesMeta(
+ lhsMeta, lhsMetas, lhs,
+ )
+
+ rhsResultMeta = block.ResultMetadata{
+ LocalOnly: false,
+ Exhaustive: true,
+ Warnings: []block.Warning{{Name: "foo", Message: "bar"}},
+ }
+
+ rhsMeta = meta(bounds, "right", rhsResultMeta)
+ rhsMetas = test.NewSeriesMeta("a", 3)[1:]
+ rhs = [][]float64{{10, 20, 30}, {40, 50, 60}}
+ right = test.NewBlockFromValuesWithMetaAndSeriesMeta(
+ rhsMeta, rhsMetas, rhs,
+ )
+
+ expected = [][]float64{{41, 52, 63}, {14, 25, 36}}
+ )
+
+ op, err := NewOp(
+ PlusType,
+ NodeParams{
+ LNode: parser.NodeID(0),
+ RNode: parser.NodeID(1),
+ VectorMatcherBuilder: oneToOneVectorMatchingBuilder,
+ },
+ )
+ require.NoError(t, err)
+
+ c, sink := executor.NewControllerWithSink(parser.NodeID(2))
+ node := op.(baseOp).Node(c, transform.Options{})
+
+ err = node.Process(models.NoopQueryContext(), parser.NodeID(0), left)
+ require.NoError(t, err)
+
+ err = node.Process(models.NoopQueryContext(), parser.NodeID(1), right)
+ require.NoError(t, err)
+
+ test.EqualsWithNans(t, expected, sink.Values)
+
+ expectedMetas := []block.SeriesMeta{
+ {
+ Name: []byte("a0"),
+ Tags: models.EmptyTags(),
+ },
+ {
+ Name: []byte("a1"),
+ Tags: models.NewTags(1, models.NewTagOptions()).AddTag(toTag("a1", "a1")),
+ },
+ }
+
+ assert.Equal(t, expectedMetas, sink.Metas)
+}
+
+func oneToOneVectorMatchingBuilder(_, _ block.Block) VectorMatching {
+ return VectorMatching{
+ Set: true,
+ Card: CardOneToOne,
+ On: true,
+ MatchingLabels: [][]byte{[]byte("a1")},
+ }
+}
diff --git a/src/query/functions/fetch_test.go b/src/query/functions/fetch_test.go
index e7783aa74d..0e6ae9a1fa 100644
--- a/src/query/functions/fetch_test.go
+++ b/src/query/functions/fetch_test.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/storage/mock"
"github.com/m3db/m3/src/query/test"
"github.com/m3db/m3/src/query/test/executor"
@@ -135,7 +136,7 @@ func TestFetchWithRestrictFetch(t *testing.T) {
tally.NoopScope, cost.NoopChainedEnforcer(),
models.QueryContextOptions{
RestrictFetchType: &models.RestrictFetchTypeQueryContextOptions{
- MetricsType: uint(storage.AggregatedMetricsType),
+ MetricsType: uint(storagemetadata.AggregatedMetricsType),
StoragePolicy: policy.MustParseStoragePolicy("10s:42d"),
},
})
@@ -148,6 +149,7 @@ func TestFetchWithRestrictFetch(t *testing.T) {
fetchOpts := mockStorage.LastFetchOptions()
restrictByType := fetchOpts.RestrictQueryOptions.GetRestrictByType()
require.NotNil(t, restrictByType)
- assert.Equal(t, storage.AggregatedMetricsType, storage.MetricsType(restrictByType.MetricsType))
+ assert.Equal(t, storagemetadata.AggregatedMetricsType,
+ storagemetadata.MetricsType(restrictByType.MetricsType))
assert.Equal(t, "10s:42d", restrictByType.StoragePolicy.String())
}
diff --git a/src/query/functions/linear/histogram_quantile.go b/src/query/functions/linear/histogram_quantile.go
index e8990af506..6316c4eb60 100644
--- a/src/query/functions/linear/histogram_quantile.go
+++ b/src/query/functions/linear/histogram_quantile.go
@@ -133,13 +133,30 @@ func (b indexedBuckets) Less(i, j int) bool {
type bucketedSeries map[string]indexedBuckets
-func gatherSeriesToBuckets(metas []block.SeriesMeta) bucketedSeries {
+type validSeriesBuckets []indexedBuckets
+
+func (b validSeriesBuckets) Len() int { return len(b) }
+func (b validSeriesBuckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b validSeriesBuckets) Less(i, j int) bool {
+ if len(b[i].buckets) == 0 {
+ return false
+ }
+
+ if len(b[j].buckets) == 0 {
+ return true
+ }
+
+ // An arbitrarily chosen sort that guarantees deterministic results.
+ return b[i].buckets[0].idx < b[j].buckets[0].idx
+}
+
+func gatherSeriesToBuckets(metas []block.SeriesMeta) validSeriesBuckets {
bucketsForID := make(bucketedSeries, initIndexBucketLength)
for i, meta := range metas {
tags := meta.Tags
value, found := tags.Bucket()
if !found {
- // This series does not have a bucket tag; drop it from the output.
+ // this series does not have a bucket tag; drop it from the output.
continue
}
@@ -151,43 +168,49 @@ func gatherSeriesToBuckets(metas []block.SeriesMeta) bucketedSeries {
excludeTags := [][]byte{tags.Opts.MetricName(), tags.Opts.BucketName()}
tagsWithoutKeys := tags.TagsWithoutKeys(excludeTags)
- id := tagsWithoutKeys.ID()
+ id := string(tagsWithoutKeys.ID())
newBucket := indexedBucket{
upperBound: bound,
idx: i,
}
- if buckets, found := bucketsForID[string(id)]; !found {
- // Add a single indexed bucket for this ID with the current index only.
+ if buckets, found := bucketsForID[id]; !found {
+ // add a single indexed bucket for this ID with the current index only.
newBuckets := make([]indexedBucket, 0, initIndexBucketLength)
newBuckets = append(newBuckets, newBucket)
- bucketsForID[string(id)] = indexedBuckets{
+ bucketsForID[id] = indexedBuckets{
buckets: newBuckets,
tags: tagsWithoutKeys,
}
} else {
buckets.buckets = append(buckets.buckets, newBucket)
- bucketsForID[string(id)] = buckets
+ bucketsForID[id] = buckets
}
}
- return bucketsForID
+ return sanitizeBuckets(bucketsForID)
}
// sanitize sorts the bucket maps by upper bound, dropping any series which
// have less than two buckets, or any that do not have an upper bound of +Inf
-func sanitizeBuckets(bucketMap bucketedSeries) {
- for k, buckets := range bucketMap {
+func sanitizeBuckets(bucketMap bucketedSeries) validSeriesBuckets {
+ validSeriesBuckets := make(validSeriesBuckets, 0, len(bucketMap))
+ for _, buckets := range bucketMap {
if len(buckets.buckets) < 2 {
- delete(bucketMap, k)
+ continue
}
sort.Sort(buckets)
maxBound := buckets.buckets[len(buckets.buckets)-1].upperBound
if !math.IsInf(maxBound, 1) {
- delete(bucketMap, k)
+ continue
}
+
+ validSeriesBuckets = append(validSeriesBuckets, buckets)
}
+
+ sort.Sort(validSeriesBuckets)
+ return validSeriesBuckets
}
func bucketQuantile(q float64, buckets []bucketValue) float64 {
@@ -257,34 +280,30 @@ func (n *histogramQuantileNode) ProcessBlock(
meta := b.Meta()
seriesMetas := utils.FlattenMetadata(meta, stepIter.SeriesMeta())
- bucketedSeries := gatherSeriesToBuckets(seriesMetas)
+ seriesBuckets := gatherSeriesToBuckets(seriesMetas)
q := n.op.q
if q < 0 || q > 1 {
- return processInvalidQuantile(queryCtx, q, bucketedSeries, meta, stepIter, n.controller)
+ return processInvalidQuantile(queryCtx, q, seriesBuckets, meta, stepIter, n.controller)
}
- return processValidQuantile(queryCtx, q, bucketedSeries, meta, stepIter, n.controller)
+ return processValidQuantile(queryCtx, q, seriesBuckets, meta, stepIter, n.controller)
}
func setupBuilder(
queryCtx *models.QueryContext,
- bucketedSeries bucketedSeries,
+ seriesBuckets validSeriesBuckets,
meta block.Metadata,
stepIter block.StepIter,
controller *transform.Controller,
) (block.Builder, error) {
- metas := make([]block.SeriesMeta, len(bucketedSeries))
- idx := 0
- for _, v := range bucketedSeries {
- metas[idx] = block.SeriesMeta{
+ metas := make([]block.SeriesMeta, 0, len(seriesBuckets))
+ for _, v := range seriesBuckets {
+ metas = append(metas, block.SeriesMeta{
Tags: v.tags,
- }
-
- idx++
+ })
}
- meta.Tags, metas = utils.DedupeMetadata(metas, meta.Tags.Opts)
builder, err := controller.BlockBuilder(queryCtx, meta, metas)
if err != nil {
return nil, err
@@ -297,17 +316,29 @@ func setupBuilder(
return builder, nil
}
+// Enforce monotonicity for binary search to work.
+// See https://github.com/prometheus/prometheus/commit/896f951e6846ce252d9d19fd4707a4110ceda5ee
+func ensureMonotonic(bucketValues []bucketValue) {
+ max := math.Inf(-1)
+ for i := range bucketValues {
+ switch {
+ case bucketValues[i].value >= max:
+ max = bucketValues[i].value
+ case bucketValues[i].value < max:
+ bucketValues[i].value = max
+ }
+ }
+}
+
func processValidQuantile(
queryCtx *models.QueryContext,
q float64,
- bucketedSeries bucketedSeries,
+ seriesBuckets validSeriesBuckets,
meta block.Metadata,
stepIter block.StepIter,
controller *transform.Controller,
) (block.Block, error) {
- sanitizeBuckets(bucketedSeries)
-
- builder, err := setupBuilder(queryCtx, bucketedSeries, meta, stepIter, controller)
+ builder, err := setupBuilder(queryCtx, seriesBuckets, meta, stepIter, controller)
if err != nil {
return nil, err
}
@@ -317,9 +348,8 @@ func processValidQuantile(
values := step.Values()
bucketValues := make([]bucketValue, 0, initIndexBucketLength)
- aggregatedValues := make([]float64, len(bucketedSeries))
- idx := 0
- for _, b := range bucketedSeries {
+ aggregatedValues := make([]float64, 0, len(seriesBuckets))
+ for _, b := range seriesBuckets {
buckets := b.buckets
// clear previous bucket values.
bucketValues = bucketValues[:0]
@@ -336,8 +366,9 @@ func processValidQuantile(
}
}
- aggregatedValues[idx] = bucketQuantile(q, bucketValues)
- idx++
+ ensureMonotonic(bucketValues)
+
+ aggregatedValues = append(aggregatedValues, bucketQuantile(q, bucketValues))
}
if err := builder.AppendValues(index, aggregatedValues); err != nil {
@@ -355,12 +386,12 @@ func processValidQuantile(
func processInvalidQuantile(
queryCtx *models.QueryContext,
q float64,
- bucketedSeries bucketedSeries,
+ seriesBuckets validSeriesBuckets,
meta block.Metadata,
stepIter block.StepIter,
controller *transform.Controller,
) (block.Block, error) {
- builder, err := setupBuilder(queryCtx, bucketedSeries, meta, stepIter, controller)
+ builder, err := setupBuilder(queryCtx, seriesBuckets, meta, stepIter, controller)
if err != nil {
return nil, err
}
@@ -373,7 +404,7 @@ func processInvalidQuantile(
}
setValue := math.Inf(sign)
- outValues := make([]float64, len(bucketedSeries))
+ outValues := make([]float64, len(seriesBuckets))
util.Memset(outValues, setValue)
for index := 0; stepIter.Next(); index++ {
if err := builder.AppendValues(index, outValues); err != nil {
diff --git a/src/query/functions/linear/histogram_quantile_test.go b/src/query/functions/linear/histogram_quantile_test.go
index 99a3e0695c..70a648e780 100644
--- a/src/query/functions/linear/histogram_quantile_test.go
+++ b/src/query/functions/linear/histogram_quantile_test.go
@@ -92,7 +92,7 @@ func TestGatherSeriesToBuckets(t *testing.T) {
},
}
- assert.Equal(t, expected, actual)
+ assert.Equal(t, sanitizeBuckets(expected), actual)
}
func TestSanitizeBuckets(t *testing.T) {
@@ -132,8 +132,8 @@ func TestSanitizeBuckets(t *testing.T) {
},
}
- actual := bucketedSeries{
- `{bar="baz"}`: indexedBuckets{
+ expected := validSeriesBuckets{
+ indexedBuckets{
buckets: []indexedBucket{
{upperBound: 1, idx: 0},
{upperBound: 2, idx: 3},
@@ -143,8 +143,73 @@ func TestSanitizeBuckets(t *testing.T) {
},
}
- sanitizeBuckets(bucketed)
- assert.Equal(t, actual, bucketed)
+ assert.Equal(t, expected, sanitizeBuckets(bucketed))
+}
+
+func TestEnsureMonotonic(t *testing.T) {
+ tests := []struct {
+ name string
+ data []bucketValue
+ want []bucketValue
+ }{
+ {
+ "empty",
+ []bucketValue{},
+ []bucketValue{},
+ },
+ {
+ "one",
+ []bucketValue{{upperBound: 1, value: 5}},
+ []bucketValue{{upperBound: 1, value: 5}},
+ },
+ {
+ "two monotonic",
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 6}},
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 6}},
+ },
+ {
+ "two nonmonotonic",
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 4}},
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 5}},
+ },
+ {
+ "three monotonic",
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 6}, {upperBound: 3, value: 7}},
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 6}, {upperBound: 3, value: 7}},
+ },
+ {
+ "three nonmonotonic",
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 3}, {upperBound: 3, value: 4}},
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 5}, {upperBound: 3, value: 5}},
+ },
+ {
+ "four nonmonotonic",
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 3}, {upperBound: 3, value: 6}, {upperBound: 4, value: 3}},
+ []bucketValue{{upperBound: 1, value: 5}, {upperBound: 2, value: 5}, {upperBound: 3, value: 6}, {upperBound: 4, value: 6}},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ensureMonotonic(tt.data)
+ assert.Equal(t, tt.want, tt.data)
+ })
+ }
+}
+
+func TestEnsureMonotonicPreserveNaN(t *testing.T) {
+ data := []bucketValue{
+ {upperBound: 1, value: 5},
+ {upperBound: 2, value: 3},
+ {upperBound: 3, value: math.NaN()},
+ {upperBound: 4, value: 0},
+ }
+ ensureMonotonic(data)
+ assert.Equal(t, data[0], bucketValue{upperBound: 1, value: 5})
+ assert.Equal(t, data[1], bucketValue{upperBound: 2, value: 5})
+ assert.Equal(t, data[2].upperBound, float64(3))
+ assert.True(t, math.IsNaN(data[2].value))
+ assert.Equal(t, data[3], bucketValue{upperBound: 4, value: 5})
}
func TestBucketQuantile(t *testing.T) {
@@ -295,3 +360,83 @@ func TestQuantileFunctionForInvalidQValues(t *testing.T) {
actual = testQuantileFunctionWithQ(t, 0.8)
test.EqualsWithNansWithDelta(t, [][]float64{{15.6, 20, math.NaN(), 2, math.NaN()}}, actual, 0.00001)
}
+
+func testWithMultipleBuckets(t *testing.T, q float64) [][]float64 {
+ args := make([]interface{}, 0, 1)
+ args = append(args, q)
+ op, err := NewHistogramQuantileOp(args, HistogramQuantileType)
+ require.NoError(t, err)
+
+ name := []byte("name")
+ bucket := []byte("bucket")
+ tagOpts := models.NewTagOptions().
+ SetIDSchemeType(models.TypeQuoted).
+ SetMetricName(name).
+ SetBucketName(bucket)
+
+ tags := models.NewTags(3, tagOpts).SetName([]byte("foo")).AddTag(models.Tag{
+ Name: []byte("bar"),
+ Value: []byte("baz"),
+ })
+
+ tagsTwo := models.NewTags(3, tagOpts).SetName([]byte("qux")).AddTag(models.Tag{
+ Name: []byte("quaz"),
+ Value: []byte("quail"),
+ })
+
+ seriesMetas := []block.SeriesMeta{
+ {Tags: tags.Clone().SetBucket([]byte("1"))},
+ {Tags: tags.Clone().SetBucket([]byte("2"))},
+ {Tags: tags.Clone().SetBucket([]byte("5"))},
+ {Tags: tags.Clone().SetBucket([]byte("10"))},
+ {Tags: tags.Clone().SetBucket([]byte("20"))},
+ {Tags: tags.Clone().SetBucket([]byte("Inf"))},
+ {Tags: tagsTwo.Clone().SetBucket([]byte("1"))},
+ {Tags: tagsTwo.Clone().SetBucket([]byte("2"))},
+ {Tags: tagsTwo.Clone().SetBucket([]byte("5"))},
+ {Tags: tagsTwo.Clone().SetBucket([]byte("10"))},
+ {Tags: tagsTwo.Clone().SetBucket([]byte("20"))},
+ {Tags: tagsTwo.Clone().SetBucket([]byte("Inf"))},
+ }
+
+ v := [][]float64{
+ {1, 1, 11, math.NaN(), math.NaN()},
+ {2, 2, 12, 13, math.NaN()},
+ {5, 5, 15, math.NaN(), math.NaN()},
+ {10, 10, 20, math.NaN(), math.NaN()},
+ {15, 15, 25, math.NaN(), math.NaN()},
+ {16, 19, math.NaN(), 71, 1},
+ {21, 31, 411, math.NaN(), math.NaN()},
+ {22, 32, 412, 513, math.NaN()},
+ {25, 35, 415, math.NaN(), math.NaN()},
+ {210, 310, 420, math.NaN(), math.NaN()},
+ {215, 315, 425, math.NaN(), math.NaN()},
+ {216, 319, math.NaN(), 571, 601},
+ }
+
+ bounds := models.Bounds{
+ Start: time.Now(),
+ Duration: time.Minute * 5,
+ StepSize: time.Minute,
+ }
+
+ bl := test.NewBlockFromValuesWithSeriesMeta(bounds, seriesMetas, v)
+ c, sink := executor.NewControllerWithSink(parser.NodeID(1))
+ node := op.(histogramQuantileOp).Node(c, transform.Options{})
+ err = node.Process(models.NoopQueryContext(), parser.NodeID(0), bl)
+ require.NoError(t, err)
+
+ return sink.Values
+}
+
+func TestQuantileFunctionForMultipleBuckets(t *testing.T) {
+ for i := 0; i < 100; i++ {
+ actual := testWithMultipleBuckets(t, 0.8)
+ expected := [][]float64{
+ {15.6, 20, math.NaN(), 2, math.NaN()},
+ {8.99459, 9.00363, math.NaN(), 1.78089, math.NaN()},
+ }
+
+ test.EqualsWithNansWithDelta(t, expected, actual, 0.00001)
+ }
+}
diff --git a/src/query/functions/scalar/scalar_test.go b/src/query/functions/scalar/scalar_test.go
index 4e6e9e482f..4466741123 100644
--- a/src/query/functions/scalar/scalar_test.go
+++ b/src/query/functions/scalar/scalar_test.go
@@ -23,6 +23,7 @@ package scalar
import (
"testing"
+ "github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/executor/transform"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
@@ -56,6 +57,7 @@ func TestScalar(t *testing.T) {
err = node.Execute(models.NoopQueryContext())
require.NoError(t, err)
require.Equal(t, 1, len(sink.Values))
+ require.Equal(t, block.BlockScalar, sink.Info.BaseType())
vals := sink.Values[0]
assert.Equal(t, bounds.Steps(), len(vals))
diff --git a/src/query/functions/temporal/aggregation.go b/src/query/functions/temporal/aggregation.go
index e6fbbd725d..7a108db6f6 100644
--- a/src/query/functions/temporal/aggregation.go
+++ b/src/query/functions/temporal/aggregation.go
@@ -76,12 +76,10 @@ type aggProcessor struct {
func (a aggProcessor) initialize(
_ time.Duration,
- controller *transform.Controller,
opts transform.Options,
) processor {
return &aggNode{
- controller: controller,
- aggFunc: a.aggFunc,
+ aggFunc: a.aggFunc,
}
}
@@ -137,9 +135,8 @@ func NewAggOp(args []interface{}, optype string) (transform.Params, error) {
}
type aggNode struct {
- controller *transform.Controller
- values []float64
- aggFunc func([]float64) float64
+ values []float64
+ aggFunc func([]float64) float64
}
func (a *aggNode) process(datapoints ts.Datapoints, _ iterationBounds) float64 {
diff --git a/src/query/functions/temporal/base.go b/src/query/functions/temporal/base.go
index 5a3176d831..915744372c 100644
--- a/src/query/functions/temporal/base.go
+++ b/src/query/functions/temporal/base.go
@@ -50,11 +50,7 @@ type iterationBounds struct {
// makeProcessor is a way to create a transform.
type makeProcessor interface {
// initialize initializes the processor.
- initialize(
- duration time.Duration,
- controller *transform.Controller,
- opts transform.Options,
- ) processor
+ initialize(duration time.Duration, opts transform.Options) processor
}
// processor is implemented by the underlying transforms.
@@ -97,7 +93,7 @@ func (o baseOp) Node(
return &baseNode{
controller: controller,
op: o,
- processor: o.processorFn.initialize(o.duration, controller, opts),
+ makeProcessor: o.processorFn,
transformOpts: opts,
}
}
@@ -109,7 +105,7 @@ type baseNode struct {
// https://github.com/m3db/m3/issues/1430
controller controller
op baseOp
- processor processor
+ makeProcessor makeProcessor
transformOpts transform.Options
}
@@ -127,47 +123,27 @@ func (c *baseNode) Process(
return fmt.Errorf("bound duration cannot be 0, bounds: %v", bounds)
}
- seriesIter, err := b.SeriesIter()
- if err != nil {
- return err
- }
-
- // rename series to exclude their __name__ tag as part of function processing.
- resultSeriesMeta := make([]block.SeriesMeta, 0, len(seriesIter.SeriesMeta()))
- for _, m := range seriesIter.SeriesMeta() {
- tags := m.Tags.WithoutName()
- resultSeriesMeta = append(resultSeriesMeta, block.SeriesMeta{
- Name: tags.ID(),
- Tags: tags,
- })
- }
-
- builder, err := c.controller.BlockBuilder(queryCtx, meta, resultSeriesMeta)
- if err != nil {
- return err
- }
-
- steps := bounds.Steps()
- if err := builder.AddCols(steps); err != nil {
- return err
- }
-
m := blockMeta{
end: xtime.ToUnixNano(bounds.Start),
- seriesMeta: resultSeriesMeta,
+ queryCtx: queryCtx,
aggDuration: xtime.UnixNano(c.op.duration),
stepSize: xtime.UnixNano(bounds.StepSize),
- steps: steps,
+ steps: bounds.Steps(),
}
concurrency := runtime.NumCPU()
+ var builder block.Builder
batches, err := b.MultiSeriesIter(concurrency)
if err != nil {
// NB: If the unconsolidated block does not support multi series iteration,
// fallback to processing series one by one.
- singleProcess(ctx, seriesIter, builder, m, c.processor)
+ builder, err = c.singleProcess(ctx, b, m)
} else {
- batchProcess(ctx, batches, builder, m, c.processor)
+ builder, err = c.batchProcess(ctx, b, batches, m)
+ }
+
+ if err != nil {
+ return err
}
// NB: safe to close the block here.
@@ -184,33 +160,47 @@ type blockMeta struct {
end xtime.UnixNano
aggDuration xtime.UnixNano
stepSize xtime.UnixNano
+ queryCtx *models.QueryContext
steps int
- seriesMeta []block.SeriesMeta
}
-func batchProcess(
+func (c *baseNode) batchProcess(
ctx context.Context,
+ b block.Block,
iterBatches []block.SeriesIterBatch,
- builder block.Builder,
m blockMeta,
- p processor,
-) error {
+) (block.Builder, error) {
var (
- metas = m.seriesMeta
-
mu sync.Mutex
wg sync.WaitGroup
multiErr xerrors.MultiError
idx int
)
- builder.PopulateColumns(len(metas))
+ meta := b.Meta()
+ builder, err := c.controller.BlockBuilder(m.queryCtx, meta, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ err = builder.AddCols(m.steps)
+ if err != nil {
+ return nil, err
+ }
+
+ numSeries := 0
+ for _, b := range iterBatches {
+ numSeries += b.Size
+ }
+
+ builder.PopulateColumns(numSeries)
for _, batch := range iterBatches {
wg.Add(1)
// capture loop variables
loopIndex := idx
batch := batch
idx = idx + batch.Size
+ p := c.makeProcessor.initialize(c.op.duration, c.transformOpts)
go func() {
err := parallelProcess(ctx, loopIndex, batch.Iter, builder, m, p, &mu)
if err != nil {
@@ -225,7 +215,7 @@ func batchProcess(
}
wg.Wait()
- return multiErr.FinalError()
+ return builder, multiErr.FinalError()
}
func parallelProcess(
@@ -248,14 +238,20 @@ func parallelProcess(
// Simulate as if we did all the decoding up front so we can visualize
// how much decoding takes relative to the entire processing of the function.
- _, sp, _ := xcontext.StartSampledTraceSpan(ctx, tracepoint.TemporalDecodeParallel, opentracing.StartTime(start))
+ _, sp, _ := xcontext.StartSampledTraceSpan(ctx,
+ tracepoint.TemporalDecodeParallel, opentracing.StartTime(start))
sp.FinishWithOptions(opentracing.FinishOptions{
FinishTime: start.Add(decodeDuration),
})
}()
values := make([]float64, 0, blockMeta.steps)
- for iter.Next() {
+ metas := iter.SeriesMeta()
+ for i := 0; iter.Next(); i++ {
+ if i >= len(metas) {
+ return fmt.Errorf("invalid series meta index: %d, max %d", i, len(metas))
+ }
+
var (
newVal float64
init = 0
@@ -266,12 +262,17 @@ func parallelProcess(
series = iter.Current()
datapoints = series.Datapoints()
stats = series.Stats()
+ seriesMeta = metas[i]
)
if stats.Enabled {
decodeDuration += stats.DecodeDuration
}
+ // rename series to exclude their __name__ tag as
+ // part of function processing.
+ seriesMeta.Tags = seriesMeta.Tags.WithoutName()
+ seriesMeta.Name = seriesMeta.Tags.ID()
values = values[:0]
for i := 0; i < blockMeta.steps; i++ {
iterBounds := iterationBounds{
@@ -293,9 +294,10 @@ func parallelProcess(
}
mu.Lock()
+
// NB: this sets the values internally, so no need to worry about keeping
// a reference to underlying `values`.
- err := builder.SetRow(idx, values, blockMeta.seriesMeta[idx])
+ err := builder.SetRow(idx, values, seriesMeta)
mu.Unlock()
idx++
if err != nil {
@@ -306,29 +308,56 @@ func parallelProcess(
return iter.Err()
}
-func singleProcess(
+func (c *baseNode) singleProcess(
ctx context.Context,
- seriesIter block.SeriesIter,
- builder block.Builder,
+ b block.Block,
m blockMeta,
- p processor,
-) error {
+) (block.Builder, error) {
var (
start = time.Now()
decodeDuration time.Duration
)
+
defer func() {
if decodeDuration == 0 {
return // Do not record this span if instrumentation is not turned on.
}
// Simulate as if we did all the decoding up front so we can visualize
// how much decoding takes relative to the entire processing of the function.
- _, sp, _ := xcontext.StartSampledTraceSpan(ctx, tracepoint.TemporalDecodeSingle, opentracing.StartTime(start))
+ _, sp, _ := xcontext.StartSampledTraceSpan(ctx,
+ tracepoint.TemporalDecodeSingle, opentracing.StartTime(start))
sp.FinishWithOptions(opentracing.FinishOptions{
FinishTime: start.Add(decodeDuration),
})
}()
+ seriesIter, err := b.SeriesIter()
+ if err != nil {
+ return nil, err
+ }
+
+ // rename series to exclude their __name__ tag as part of function processing.
+ resultSeriesMeta := make([]block.SeriesMeta, 0, len(seriesIter.SeriesMeta()))
+ for _, m := range seriesIter.SeriesMeta() {
+ tags := m.Tags.WithoutName()
+ resultSeriesMeta = append(resultSeriesMeta, block.SeriesMeta{
+ Name: tags.ID(),
+ Tags: tags,
+ })
+ }
+
+ meta := b.Meta()
+ builder, err := c.controller.BlockBuilder(m.queryCtx, meta, resultSeriesMeta)
+ if err != nil {
+ return nil, err
+ }
+
+ err = builder.AddCols(m.steps)
+ if err != nil {
+ return nil, err
+ }
+
+ p := c.makeProcessor.initialize(c.op.duration, c.transformOpts)
for seriesIter.Next() {
var (
newVal float64
@@ -361,7 +390,7 @@ func singleProcess(
}
if err := builder.AppendValue(i, newVal); err != nil {
- return err
+ return nil, err
}
start += step
@@ -369,7 +398,7 @@ func singleProcess(
}
}
- return seriesIter.Err()
+ return builder, seriesIter.Err()
}
// getIndices returns the index of the points on the left and the right of the
diff --git a/src/query/functions/temporal/base_test.go b/src/query/functions/temporal/base_test.go
index 489a2ecfa3..6e38d0af97 100644
--- a/src/query/functions/temporal/base_test.go
+++ b/src/query/functions/temporal/base_test.go
@@ -21,6 +21,7 @@
package temporal
import (
+ "fmt"
"math"
"testing"
"time"
@@ -33,8 +34,10 @@ import (
"github.com/m3db/m3/src/query/test/executor"
"github.com/m3db/m3/src/query/test/transformtest"
"github.com/m3db/m3/src/query/ts"
+ xtest "github.com/m3db/m3/src/x/test"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -52,66 +55,73 @@ type opGenerator func(t *testing.T, tc testCase) transform.Params
func testTemporalFunc(t *testing.T, opGen opGenerator, tests []testCase) {
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- values, bounds := test.GenerateValuesAndBounds(tt.vals, nil)
- boundStart := bounds.Start
-
- seriesMetas := []block.SeriesMeta{
- {
- Name: []byte("s1"),
- Tags: models.EmptyTags().AddTags([]models.Tag{{
- Name: []byte("t1"),
- Value: []byte("v1"),
- }}).SetName([]byte("foobar")),
- },
- {
- Name: []byte("s2"),
- Tags: models.EmptyTags().AddTags([]models.Tag{{
- Name: []byte("t1"),
- Value: []byte("v2"),
- }}).SetName([]byte("foobar")),
- },
+ for _, runBatched := range []bool{true, false} {
+ name := tt.name + "_unbatched"
+ if runBatched {
+ name = tt.name + "_batched"
}
+ t.Run(name, func(t *testing.T) {
+ values, bounds := test.GenerateValuesAndBounds(tt.vals, nil)
+ boundStart := bounds.Start
+
+ seriesMetas := []block.SeriesMeta{
+ {
+ Name: []byte("s1"),
+ Tags: models.EmptyTags().AddTags([]models.Tag{{
+ Name: []byte("t1"),
+ Value: []byte("v1"),
+ }}).SetName([]byte("foobar")),
+ },
+ {
+ Name: []byte("s2"),
+ Tags: models.EmptyTags().AddTags([]models.Tag{{
+ Name: []byte("t1"),
+ Value: []byte("v2"),
+ }}).SetName([]byte("foobar")),
+ },
+ }
- bl := test.NewUnconsolidatedBlockFromDatapointsWithMeta(models.Bounds{
- Start: bounds.Start.Add(-2 * bounds.Duration),
- Duration: bounds.Duration * 2,
- StepSize: bounds.StepSize,
- }, seriesMetas, values)
-
- c, sink := executor.NewControllerWithSink(parser.NodeID(1))
- baseOp := opGen(t, tt)
- node := baseOp.Node(c, transformtest.Options(t, transform.OptionsParams{
- TimeSpec: transform.TimeSpec{
- Start: boundStart.Add(-2 * bounds.Duration),
- End: bounds.End(),
- Step: time.Second,
- },
- }))
-
- err := node.Process(models.NoopQueryContext(), parser.NodeID(0), bl)
- require.NoError(t, err)
-
- test.EqualsWithNansWithDelta(t, tt.expected, sink.Values, 0.0001)
- // Name should be dropped from series tags.
- expectedSeriesMetas := []block.SeriesMeta{
- block.SeriesMeta{
+ bl := test.NewUnconsolidatedBlockFromDatapointsWithMeta(models.Bounds{
+ Start: bounds.Start.Add(-2 * bounds.Duration),
+ Duration: bounds.Duration * 2,
+ StepSize: bounds.StepSize,
+ }, seriesMetas, values, runBatched)
+
+ c, sink := executor.NewControllerWithSink(parser.NodeID(1))
+ baseOp := opGen(t, tt)
+ node := baseOp.Node(c, transformtest.Options(t, transform.OptionsParams{
+ TimeSpec: transform.TimeSpec{
+ Start: boundStart.Add(-2 * bounds.Duration),
+ End: bounds.End(),
+ Step: time.Second,
+ },
+ }))
+
+ err := node.Process(models.NoopQueryContext(), parser.NodeID(0), bl)
+ require.NoError(t, err)
+
+ test.EqualsWithNansWithDelta(t, tt.expected, sink.Values, 0.0001)
+ metaOne := block.SeriesMeta{
Name: []byte("t1=v1,"),
Tags: models.EmptyTags().AddTags([]models.Tag{{
Name: []byte("t1"),
Value: []byte("v1"),
}}),
- },
- block.SeriesMeta{
+ }
+
+ metaTwo := block.SeriesMeta{
Name: []byte("t1=v2,"),
Tags: models.EmptyTags().AddTags([]models.Tag{{
Name: []byte("t1"),
Value: []byte("v2"),
- }})},
- }
+ }})}
- assert.Equal(t, expectedSeriesMetas, sink.Metas)
- })
+ // NB: name should be dropped from series tags, and the name
+ // should be the updated ID.
+ expectedSeriesMetas := []block.SeriesMeta{metaOne, metaTwo}
+ require.Equal(t, expectedSeriesMetas, sink.Metas)
+ })
+ }
}
}
@@ -143,3 +153,151 @@ func TestGetIndicesError(t *testing.T) {
require.Equal(t, 10, r)
require.False(t, ok)
}
+
+var _ block.SeriesIter = (*dummySeriesIter)(nil)
+
+type dummySeriesIter struct {
+ metas []block.SeriesMeta
+ vals []float64
+ idx int
+}
+
+func (it *dummySeriesIter) SeriesMeta() []block.SeriesMeta {
+ return it.metas
+}
+
+func (it *dummySeriesIter) SeriesCount() int {
+ return len(it.metas)
+}
+
+func (it *dummySeriesIter) Current() block.UnconsolidatedSeries {
+ return block.NewUnconsolidatedSeries(
+ ts.Datapoints{ts.Datapoint{Value: it.vals[it.idx]}},
+ it.metas[it.idx],
+ block.UnconsolidatedSeriesStats{},
+ )
+}
+
+func (it *dummySeriesIter) Next() bool {
+ if it.idx >= len(it.metas)-1 {
+ return false
+ }
+
+ it.idx++
+ return true
+}
+
+func (it *dummySeriesIter) Err() error {
+ return nil
+}
+
+func (it *dummySeriesIter) Close() {
+ //no-op
+}
+
+func TestParallelProcess(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ tagName := "tag"
+ c, sink := executor.NewControllerWithSink(parser.NodeID(1))
+ aggProcess := aggProcessor{
+ aggFunc: func(fs []float64) float64 {
+ require.Equal(t, 1, len(fs))
+ return fs[0]
+ },
+ }
+
+ node := baseNode{
+ controller: c,
+ op: baseOp{duration: time.Minute},
+ makeProcessor: aggProcess,
+ transformOpts: transform.Options{},
+ }
+
+ stepSize := time.Minute
+ bl := block.NewMockBlock(ctrl)
+ bl.EXPECT().Meta().Return(block.Metadata{
+ Bounds: models.Bounds{
+ StepSize: stepSize,
+ Duration: stepSize,
+ }}).AnyTimes()
+
+ numSeries := 10
+ seriesMetas := make([]block.SeriesMeta, 0, numSeries)
+ vals := make([]float64, 0, numSeries)
+ for i := 0; i < numSeries; i++ {
+ number := fmt.Sprint(i)
+ name := []byte(fmt.Sprintf("%d_should_not_appear_after_func_applied", i))
+ meta := block.SeriesMeta{
+ Name: []byte(number),
+ Tags: models.MustMakeTags(tagName, number).SetName(name),
+ }
+
+ seriesMetas = append(seriesMetas, meta)
+ vals = append(vals, float64(i))
+ }
+
+ fullIter := &dummySeriesIter{
+ idx: -1,
+ vals: vals,
+ metas: seriesMetas,
+ }
+
+ bl.EXPECT().SeriesIter().Return(fullIter, nil).MaxTimes(1)
+
+ numBatches := 3
+ blockMetas := make([][]block.SeriesMeta, 0, numBatches)
+ blockVals := make([][]float64, 0, numBatches)
+ for i := 0; i < numBatches; i++ {
+ l := numSeries/numBatches + 1
+ blockMetas = append(blockMetas, make([]block.SeriesMeta, 0, l))
+ blockVals = append(blockVals, make([]float64, 0, l))
+ }
+
+ for i, meta := range seriesMetas {
+ idx := i % numBatches
+ blockMetas[idx] = append(blockMetas[idx], meta)
+ blockVals[idx] = append(blockVals[idx], float64(i))
+ }
+
+ batches := make([]block.SeriesIterBatch, 0, numBatches)
+ for i := 0; i < numBatches; i++ {
+ iter := &dummySeriesIter{
+ idx: -1,
+ vals: blockVals[i],
+ metas: blockMetas[i],
+ }
+
+ batches = append(batches, block.SeriesIterBatch{
+ Iter: iter,
+ Size: len(blockVals[i]),
+ })
+ }
+
+ bl.EXPECT().MultiSeriesIter(gomock.Any()).Return(batches, nil).MaxTimes(1)
+ bl.EXPECT().Close().Times(1)
+
+ err := node.Process(models.NoopQueryContext(), parser.NodeID(0), bl)
+ require.NoError(t, err)
+
+ expected := []float64{
+ 0, 3, 6, 9,
+ 1, 4, 7,
+ 2, 5, 8,
+ }
+
+ for i, v := range sink.Values {
+ assert.Equal(t, expected[i], v[0])
+ }
+
+ for i, m := range sink.Metas {
+ expected := fmt.Sprint(expected[i])
+ expectedName := fmt.Sprintf("tag=%s,", expected)
+ assert.Equal(t, expectedName, string(m.Name))
+ require.Equal(t, 1, m.Tags.Len())
+ tag, found := m.Tags.Get([]byte(tagName))
+ require.True(t, found)
+ assert.Equal(t, expected, string(tag))
+ }
+}
diff --git a/src/query/functions/temporal/functions.go b/src/query/functions/temporal/functions.go
index c58e197e56..12ddde4669 100644
--- a/src/query/functions/temporal/functions.go
+++ b/src/query/functions/temporal/functions.go
@@ -49,11 +49,9 @@ type functionProcessor struct {
func (f functionProcessor) initialize(
_ time.Duration,
- controller *transform.Controller,
- opts transform.Options,
+ _ transform.Options,
) processor {
- return &functionNode{
- controller: controller,
+ return &functionNode{
comparisonFunc: f.compFunc,
}
}
@@ -84,8 +82,7 @@ func NewFunctionOp(args []interface{}, optype string) (transform.Params, error)
return newBaseOp(duration, optype, f)
}
-type functionNode struct {
- controller *transform.Controller
+type functionNode struct {
comparisonFunc comparisonFunc
}
diff --git a/src/query/functions/temporal/linear_regression.go b/src/query/functions/temporal/linear_regression.go
index 29f857f6b1..2af1317c75 100644
--- a/src/query/functions/temporal/linear_regression.go
+++ b/src/query/functions/temporal/linear_regression.go
@@ -49,11 +49,9 @@ type linearRegressionProcessor struct {
func (l linearRegressionProcessor) initialize(
_ time.Duration,
- controller *transform.Controller,
opts transform.Options,
) processor {
return &linearRegressionNode{
- controller: controller,
timeSpec: opts.TimeSpec(),
fn: l.fn,
isDeriv: l.isDeriv,
@@ -121,7 +119,6 @@ func NewLinearRegressionOp(
}
type linearRegressionNode struct {
- controller *transform.Controller
timeSpec transform.TimeSpec
fn linearRegFn
isDeriv bool
diff --git a/src/query/functions/temporal/rate.go b/src/query/functions/temporal/rate.go
index ac4b3f7bd0..6e5b990b09 100644
--- a/src/query/functions/temporal/rate.go
+++ b/src/query/functions/temporal/rate.go
@@ -49,44 +49,53 @@ const (
IncreaseType = "increase"
)
-type rateProcessor struct {
- isRate, isCounter bool
- rateFn rateFn
+// RateProcessor is a structure containing details about the rate.
+type RateProcessor struct {
+ IsRate, IsCounter bool
+ RateFn RateFn
}
-func (r rateProcessor) initialize(
+func (r RateProcessor) initialize(
duration time.Duration,
- controller *transform.Controller,
- opts transform.Options,
+ _ transform.Options,
) processor {
return &rateNode{
- controller: controller,
- isRate: r.isRate,
- isCounter: r.isCounter,
- rateFn: r.rateFn,
- duration: duration,
+ isRate: r.IsRate,
+ isCounter: r.IsCounter,
+ rateFn: r.RateFn,
+ duration: duration,
}
}
-// NewRateOp creates a new base temporal transform for rate functions
-func NewRateOp(args []interface{}, optype string) (transform.Params, error) {
+// NewRateOpWithProcessor creates a new base temporal transform for
+// the given rate processor.
+func NewRateOpWithProcessor(
+ args []interface{},
+ opType string,
+ rateProcessor RateProcessor,
+) (transform.Params, error) {
if len(args) != 1 {
return emptyOp,
- fmt.Errorf("invalid number of args for %s: %d", optype, len(args))
+ fmt.Errorf("invalid number of args for %s: %d", opType, len(args))
}
duration, ok := args[0].(time.Duration)
if !ok {
return emptyOp,
- fmt.Errorf("unable to cast to scalar argument: %v for %s", args[0], optype)
+ fmt.Errorf("unable to cast to scalar argument: %v for %s", args[0], opType)
}
+ return newBaseOp(duration, opType, rateProcessor)
+}
+
+// NewRateOp creates a new base temporal transform for rate functions.
+func NewRateOp(args []interface{}, opType string) (transform.Params, error) {
var (
isRate, isCounter bool
rateFn = standardRateFunc
)
- switch optype {
+ switch opType {
case IRateType:
isRate = true
rateFn = irateFunc
@@ -99,19 +108,20 @@ func NewRateOp(args []interface{}, optype string) (transform.Params, error) {
isCounter = true
case DeltaType:
default:
- return nil, fmt.Errorf("unknown rate type: %s", optype)
+ return nil, fmt.Errorf("unknown rate type: %s", opType)
}
- r := rateProcessor{
- isRate: isRate,
- isCounter: isCounter,
- rateFn: rateFn,
+ r := RateProcessor{
+ IsRate: isRate,
+ IsCounter: isCounter,
+ RateFn: rateFn,
}
- return newBaseOp(duration, optype, r)
+ return NewRateOpWithProcessor(args, opType, r)
}
-type rateFn func(
+// RateFn is a function that calculates rate over the given set of datapoints.
+type RateFn func(
datapoints ts.Datapoints,
isRate bool,
isCounter bool,
@@ -121,10 +131,9 @@ type rateFn func(
) float64
type rateNode struct {
- controller *transform.Controller
isRate, isCounter bool
duration time.Duration
- rateFn rateFn
+ rateFn RateFn
}
func (r *rateNode) process(datapoints ts.Datapoints, bounds iterationBounds) float64 {
diff --git a/src/query/functions/utils/group.go b/src/query/functions/utils/group.go
index cc83959158..76e335dbd2 100644
--- a/src/query/functions/utils/group.go
+++ b/src/query/functions/utils/group.go
@@ -21,18 +21,16 @@
package utils
import (
+ "bytes"
+ "sort"
+
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
)
-type withKeysID func(tags models.Tags, matchingTags [][]byte) uint64
-
-func includeKeysID(tags models.Tags, matchingTags [][]byte) uint64 {
- return tags.TagsWithKeys(matchingTags).HashedID()
-}
-
-func excludeKeysID(tags models.Tags, matchingTags [][]byte) uint64 {
- return tags.TagsWithoutKeys(matchingTags).HashedID()
+type group struct {
+ buckets []int
+ tags models.Tags
}
type withKeysTags func(tags models.Tags, matchingTags [][]byte) models.Tags
@@ -42,7 +40,7 @@ func includeKeysTags(tags models.Tags, matchingTags [][]byte) models.Tags {
}
func excludeKeysTags(tags models.Tags, matchingTags [][]byte) models.Tags {
- return tags.TagsWithoutKeys(matchingTags)
+ return tags.TagsWithoutKeys(matchingTags).WithoutName()
}
// GroupSeries groups series by tags.
@@ -55,26 +53,19 @@ func GroupSeries(
opName []byte,
metas []block.SeriesMeta,
) ([][]int, []block.SeriesMeta) {
- var idFunc withKeysID
var tagsFunc withKeysTags
if without {
- idFunc = excludeKeysID
tagsFunc = excludeKeysTags
} else {
- idFunc = includeKeysID
tagsFunc = includeKeysTags
}
- type tagMatch struct {
- buckets []int
- tags models.Tags
- }
-
- tagMap := make(map[uint64]*tagMatch)
+ groups := make(map[uint64]*group)
for i, meta := range metas {
+ tags := tagsFunc(meta.Tags, matchingTags)
// NB(arnikola): Get the ID of the series with relevant tags
- id := idFunc(meta.Tags, matchingTags)
- if val, ok := tagMap[id]; ok {
+ id := tags.HashedID()
+ if val, ok := groups[id]; ok {
// If ID has been seen, the corresponding grouped
// series for this index already exists; add the
// current index to the bucket for that
@@ -84,24 +75,60 @@ func GroupSeries(
// If ID has not been seen, create a grouped series
// with the appropriate tags, and add the current index
// to the bucket for that grouped series
- tagMap[id] = &tagMatch{
+ groups[id] = &group{
buckets: []int{i},
- tags: tagsFunc(meta.Tags, matchingTags),
+ tags: tags,
}
}
}
- groupedBuckets := make([][]int, len(tagMap))
- groupedMetas := make([]block.SeriesMeta, len(tagMap))
- i := 0
- for _, v := range tagMap {
- groupedBuckets[i] = v.buckets
+ sortedGroups := sortGroups(groups)
+
+ groupedBuckets := make([][]int, len(groups))
+ groupedMetas := make([]block.SeriesMeta, len(groups))
+
+ for i, group := range sortedGroups {
+ groupedBuckets[i] = group.buckets
groupedMetas[i] = block.SeriesMeta{
- Tags: v.tags,
+ Tags: group.tags,
Name: opName,
}
- i++
}
return groupedBuckets, groupedMetas
}
+
+func sortGroups(groups map[uint64]*group) []*group {
+ result := make([]*group, 0, len(groups))
+
+ for _, group := range groups {
+ result = append(result, group)
+ }
+
+ sort.Slice(result, func(i int, j int) bool {
+ return compareTagSets(result[i].tags, result[j].tags) < 0
+ })
+
+ return result
+}
+
+func compareTagSets(a, b models.Tags) int {
+ l := a.Len()
+
+ if b.Len() < l {
+ l = b.Len()
+ }
+
+ for i := 0; i < l; i++ {
+ byName := bytes.Compare(a.Tags[i].Name, b.Tags[i].Name)
+ if byName != 0 {
+ return byName
+ }
+ byValue := bytes.Compare(a.Tags[i].Value, b.Tags[i].Value)
+ if byValue != 0 {
+ return byValue
+ }
+ }
+ // If all tags so far were in common, the set with fewer tags comes first.
+ return a.Len() - b.Len()
+}
diff --git a/src/query/functions/utils/group_test.go b/src/query/functions/utils/group_test.go
index 2d14dd8ab0..ac662998b7 100644
--- a/src/query/functions/utils/group_test.go
+++ b/src/query/functions/utils/group_test.go
@@ -50,13 +50,13 @@ var collectTest = []struct {
}),
[][]int{{0, 1, 2, 3, 4, 5}},
[]models.Tags{models.EmptyTags()},
- [][]int{{0}, {1}, {2}, {3}, {4}, {5}},
+ [][]int{{0}, {3}, {1}, {4}, {2}, {5}},
multiTagsFromMaps([]map[string]string{
{"a": "1"},
- {"a": "1", "b": "2", "c": "4"},
- {"b": "2"},
{"a": "1", "b": "2", "c": "3"},
+ {"a": "1", "b": "2", "c": "4"},
{"a": "1", "b": "2", "d": "3"},
+ {"b": "2"},
{"c": "d"},
}),
},
@@ -73,13 +73,13 @@ var collectTest = []struct {
}),
[][]int{{0, 1, 2, 3, 4, 5}},
multiTagsFromMaps([]map[string]string{{}}),
- [][]int{{0}, {1}, {2}, {3}, {4}, {5}},
+ [][]int{{0}, {3}, {1}, {4}, {2}, {5}},
multiTagsFromMaps([]map[string]string{
{"a": "1"},
- {"a": "1", "b": "2", "c": "4"},
- {"b": "2"},
{"a": "1", "b": "2", "c": "3"},
+ {"a": "1", "b": "2", "c": "4"},
{"a": "1", "b": "2", "d": "3"},
+ {"b": "2"},
{"c": "d"},
}),
},
@@ -94,17 +94,17 @@ var collectTest = []struct {
{"a": "1", "b": "2", "d": "3"},
{"c": "d"},
}),
- [][]int{{0, 1, 3, 4}, {2, 5}},
+ [][]int{{2, 5}, {0, 1, 3, 4}},
multiTagsFromMaps([]map[string]string{
- {"a": "1"},
{},
+ {"a": "1"},
}),
- [][]int{{0}, {1}, {2}, {3}, {4}, {5}},
+ [][]int{{0}, {2}, {3}, {1}, {4}, {5}},
multiTagsFromMaps([]map[string]string{
{},
- {"b": "2", "c": "4"},
{"b": "2"},
{"b": "2", "c": "3"},
+ {"b": "2", "c": "4"},
{"b": "2", "d": "3"},
{"c": "d"},
}),
@@ -120,17 +120,17 @@ var collectTest = []struct {
{"a": "1", "b": "2", "d": "3"},
{"c": "d"},
}),
- [][]int{{0, 1, 3, 4}, {2, 5}},
+ [][]int{{2, 5}, {0, 1, 3, 4}},
multiTagsFromMaps([]map[string]string{
- {"a": "1"},
{},
+ {"a": "1"},
}),
- [][]int{{0}, {1}, {2}, {3}, {4}, {5}},
+ [][]int{{0}, {2}, {3}, {1}, {4}, {5}},
multiTagsFromMaps([]map[string]string{
{},
- {"b": "2", "c": "4"},
{"b": "2"},
{"b": "2", "c": "3"},
+ {"b": "2", "c": "4"},
{"b": "2", "d": "3"},
{"c": "d"},
}),
@@ -152,11 +152,11 @@ var collectTest = []struct {
{"a": "2"},
{"a": "d"},
}),
- [][]int{{0, 2, 5}, {1}, {3}, {4}},
+ [][]int{{0, 2, 5}, {3}, {1}, {4}},
multiTagsFromMaps([]map[string]string{
{},
- {"b": "2", "c": "4"},
{"b": "2", "c": "3"},
+ {"b": "2", "c": "4"},
{"b": "2", "d": "3"},
}),
},
@@ -171,18 +171,18 @@ var collectTest = []struct {
{"a": "1", "b": "2", "d": "3"},
{"c": "3"},
}),
- [][]int{{0}, {1, 3, 4}, {2}, {5}},
+ [][]int{{5}, {0}, {1, 3, 4}, {2}},
multiTagsFromMaps([]map[string]string{
+ {},
{"a": "1"},
{"a": "1", "b": "2"},
{"b": "2"},
- {},
}),
- [][]int{{0, 2}, {1}, {3, 5}, {4}},
+ [][]int{{0, 2}, {3, 5}, {1}, {4}},
multiTagsFromMaps([]map[string]string{
{},
- {"c": "4"},
{"c": "3"},
+ {"c": "4"},
{"d": "3"},
}),
},
@@ -197,11 +197,11 @@ var collectTest = []struct {
{"b": "2"},
{"c": "3"},
}),
- [][]int{{0, 1, 2}, {3}, {4, 5}},
+ [][]int{{4, 5}, {0, 1, 2}, {3}},
multiTagsFromMaps([]map[string]string{
+ {},
{"a": "1"},
{"a": "2"},
- {},
}),
[][]int{{0, 1}, {2, 3, 4}, {5}},
multiTagsFromMaps([]map[string]string{
@@ -230,6 +230,32 @@ var collectTest = []struct {
{"c": "3", "d": "6"},
}),
},
+ {
+ "metrics name",
+ []string{"a"},
+ multiTagsFromMaps([]map[string]string{
+ {"__name__": "foo", "a": "1"},
+ {"__name__": "foo", "a": "1", "b": "2", "c": "4"},
+ {"__name__": "foo", "b": "2"},
+ {"__name__": "foo", "a": "1", "b": "2", "c": "3"},
+ {"__name__": "foo", "a": "1", "b": "2", "d": "3"},
+ {"__name__": "foo", "c": "d"},
+ }),
+ [][]int{{2, 5}, {0, 1, 3, 4}},
+ multiTagsFromMaps([]map[string]string{
+ {},
+ {"a": "1"},
+ }),
+ [][]int{{0}, {2}, {3}, {1}, {4}, {5}},
+ multiTagsFromMaps([]map[string]string{
+ {},
+ {"b": "2"},
+ {"b": "2", "c": "3"},
+ {"b": "2", "c": "4"},
+ {"b": "2", "d": "3"},
+ {"c": "d"},
+ }),
+ },
}
func testCollect(t *testing.T, without bool) {
@@ -267,7 +293,7 @@ func testCollect(t *testing.T, without bool) {
}
}
- test.CompareLists(t, collected, expectedMetas, buckets, expectedIndicies)
+ test.CompareListsInOrder(t, collected, expectedMetas, buckets, expectedIndicies)
})
}
}
diff --git a/src/query/generated-source-files.mk b/src/query/generated-source-files.mk
new file mode 100644
index 0000000000..b2d2b83f78
--- /dev/null
+++ b/src/query/generated-source-files.mk
@@ -0,0 +1,35 @@
+SELF_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
+include $(SELF_DIR)/../../.ci/common.mk
+
+gopath_prefix := $(GOPATH)/src
+query_package := github.com/m3db/m3/src/query
+query_package_path := $(gopath_prefix)/$(query_package)
+consolidators_package := $(query_package)/storage/m3/consolidators
+consolidators_package_path := $(gopath_prefix)/$(consolidators_package)
+m3x_package := github.com/m3db/m3/src/x
+m3x_package_path := $(gopath_prefix)/$(m3x_package)
+m3db_package := github.com/m3db/m3
+m3db_package_path := $(gopath_prefix)/$(m3db_package)
+
+
+# Generation rule for all generated types
+.PHONY: genny-all
+genny-all: genny-map-all
+
+# Map generation rule for all generated maps
+.PHONY: genny-map-all
+genny-map-all: \
+ genny-map-multi-fetch-result
+
+# Map generation rule for query/storage/m3/consolidators/mutliFetchResultMap
+.PHONY: genny-map-multi-fetch-result
+genny-map-multi-fetch-result:
+ cd $(m3x_package_path) && make hashmap-gen \
+ pkg=consolidators \
+ key_type=models.Tags \
+ value_type=multiResultSeries \
+ rename_nogen_key=true \
+ target_package=$(consolidators_package) \
+ rename_type_prefix=fetchResult
+ # Rename generated map file
+ mv -f $(consolidators_package_path)/map_gen.go $(consolidators_package_path)/fetch_result_map_gen.go
diff --git a/src/query/generated/assets/openapi/assets.go b/src/query/generated/assets/openapi/assets.go
index b093bbf7ac..8cd445798c 100644
--- a/src/query/generated/assets/openapi/assets.go
+++ b/src/query/generated/assets/openapi/assets.go
@@ -1,6 +1,6 @@
// Code generated by "esc -modtime 12345 -prefix openapi/ -pkg openapi -ignore .go -o openapi/assets.go ."; DO NOT EDIT.
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -239,46 +239,46 @@ d8dUBmQZxiF/+uI1I7E8TMF6pCyWxDpY7WPA8pmKZsl6ouawOaOS+Sj+BQAA//8by2IcfAIAAA==
"/spec.yml": {
local: "openapi/spec.yml",
- size: 23034,
+ size: 23026,
modtime: 12345,
compressed: `
H4sIAAAAAAAC/+xc32/jNvJ/918x1X4fvgUuUTbZ6wF+c+I0ayCbNZygwLU4oLQ4ktlKpJakkvUW978f
KMmyZCn6YTtO1pVfEovDIWfmw/kMKcnv4O7zw/UQZhGH3wPyJwJRCvWJh/zkS4Ry+TswF5YigqSRL8FZ
EO6hAi1AL5gCl/n4w0A9Ec9DOQTr/PTMGjDuiuEAQDPt4xCsTxfjS2sAQFE5koWaCT4EawSUKS3ZPNJI
QbMAQaFkqIASTeZEIUSKcQ8+XTzc/wquL4j+6QM4IgglKsUEP4V/iwgcwsFlnIKINARCIpC5+deMCkTD
-bwutw6FtBxd0fuoxvYjmp0zYwYX9n/9/tulHEBIEh99umP4YzRNJNbTtVMoRQdzLDi5+PDW2PaJUiV3v
-T8+MEwAcwTVxtPEEACdB4orLMdwI4fkIN1JEoRW3RtIfgpWNYRrUqReLxUO5QkaB/e6H5K8Z2PTzmYNc
-YWGAUUicBcJt0gTnyVRKI5SssOe+mNsBURqlfTu5ur67v7YGC6G06SaUjvX/6/zsvTUwsZkSvRiCZZOQ
-2Y/vrYEmnhoOTtZ2ji/hjgSoQuJgOfhXgrvMi2QS3/Fl3C+WVdaGlqlPHAyQ6xZawpVsUcvI8yR6RAvZ
-XluuzzNax5cwTpFaVlZoPnliFMGNuGNalTVQzgIDjB0Wx8QahEQvlImkrVA+MgdVEpnML0mUPUzxBJB4
-HNLPSZXPzUdFQUDkcgjWDeqyrxMhEaIkZm4TOgQra79BvZJwBFdRPOVsPBKGPnPibvYfSvCVaCgFjZxW
-ohJVKLjCnCHnZ2frL5tetXItsQ9JXhbg/yS6Q7De2RRdxlnsbfsuZ84sHXCt6MPZhz2Pd4McJXOupRRy
-reCfe7erPE5olmsFPFqAY0QpEL6BjwZ4jCh9WXiERJIANcqccLr85oIu115jvHSp7MZ6cIwoneGXCJV+
-U+A8OxJwPpfW7L+yfyfj/yaKKfqocUsgj+POnbGcdHs1OOecsIFqQwzrSxK/REwiHYKWEWaX9TI0Wkw5
-xb2D4jfxW0ydMohNPjx6jyeFb6ySrPCoJf+TqkKpTP16gRtFUvWSyJqPg/2nOXPKCfYtsHJN3FJWZlxp
-wh1Mtl3YOoLHQNDTnDGvQdD1+Dkegm5DuzVITWm3c5JJ+o18/whSTR0XHpY7HCEkZdzsXruQyNW6WznW
-mxJ1LJNX1NPNG6KbHSPc81HPR2+Fj3aEcoGwtslXPXO9BHOR7Ni1C3E9d8BbIVBHWyPPawp/YIR6zjok
-Z+0U3Oxc08S2I28VY92TV09eeyOvnTBdoK62OWva89ahTuts02O469nPxAxLfPZti1216Xs8ycpY02er
-V8CxxPj/naE8S/RkN2UyHma80/4x1XM8yE4N6sH9isdibbP1jvvNUjrfZs95tHm90nn9OjjoOmif7Xdc
-CgU6qJDss3+P+gMdLbVN/jvt10qpv/Oera/ne9DvEfTtM/1OuC/k+bJgHeD7XN/Dfm/b2L9W+80OTxg2
-P+pQ2sy6UgSdtrOv/Mzh2ivf1yOHfcW+NbD3c890s27vl0C/BF6pkum8AvZx46V8P7Et7mMrpj34e/B3
-r2dWb0XajkSiWx7K519QK2J59bYbqqSMeWJ6AYFQGkRshQKKLol8jbQay6v5XMXT+e4r9XHBnNeo0zdn
-8DdEdoxJey6EVlqSMMwCvj3Ox0tOAuYQ31+CeEQpGU2OYQqjgLNaDhRYUtgr4ILiiY+P6GfNhXvMz6yH
-WPQe9WV+gONZH7F5lbYddpU8P4/jepKk4a2f55E/Qy0ZPiZop7llkMN6cRGs1sc/gLlA+LIN0G8OCvS/
-H8RqX27jQoMrIk5NwMwXtX4g8TuAd67F9K14FzdRmdaaYv4HOql9oTSg1GyNhDjf1ZenaWWzlqp/Jfdz
-mL6un5va57yKVvPKltg1J3MfaWmOcyF8JBnEXT9Si5ayT5JpVA/iSgQB07fCa+rgmC9R26lIDAmTrYU1
-cuOcz628PNsQz/IXJ6FaCN1yVMYpfm034iQnarrPKifcKqaZrVOUTNBxWhg0wG/uC+fPe/YN28pHrovy
-50hHsluXKVG6y5xMwrv+GjK5bHL3hvjI1SjvhB45DirV2hmTUtBaeR3bQaKLm6tev++EA48pnXdbfUaZ
-pfKFoWcFJa1zXfILHiXb8h3Nh1Aaz4L405KaTimw/KxlhwknpzK1oava1ncYYeNtgebTg5WLVr/cszk1
-xjV6mGNDVxi7k5aL88KUO8xzderyQpGbpOpzKdwUUj8TRwvZZCOPgvsFkVQ1CTIVyzWvRSfSpqh8YBXc
-XOvgnz6sx/rETLnQPFhAvsbTukc9oXXDrZzUJWy0IZEyJfx4VcQ/69Qg/E3wplrlCZm30E1OQ05Dwbhu
-UKaqo0qkJPnNocagGWGxiwuKG/1tPtlvSNXPNBRStwld9/rw+47gy7ivcBtnn75s4Z7DGh6jdjcLNwKn
-NNHYQDZJOtK581ElIungpMl/ad68I1yobROn0eG6W6tYz73gtvw8kUdB0ngC1uRu8jAZ3U5+ndzdWKuL
-o19Gk9vR5e11duX2evRLKlHxNtJeiHSrtLaxMrLdmJAOtipbck81vTkrWhN7VZWTKyPMIO1Kidpyqfgw
-TAdv+UgeGfcm2Z2r7m6rXm6EU0aJfotg2jpHvziy8oc5XbZxa/nKgFTeidlmc3TXTBvxxXqRfJpLc5gv
-HOJb+SuOHym9RSH9ouutcFJRWYI/cz7RwGqXK7k8R+8JZh9Fgq3L4lxaBV43m4hfQ3Q00vv4l2YN0uIa
-RE1RfhSR3IYnP4r9lqGEUolK7VjvvGJBu38XV98Z3SYltD2oqXi0oPMBw4aOmrsKHSx5JH6EO7Pe/wIA
-AP//JDPkT/pZAAA=
+bwutQzW0bSocdRpc0PkpE/Z//r/y8o8gJAgOv90w/TGar6U8phfR/NQRgW1k7eDix1Nj0yNKldjz/vTM
+GA/gCK6Jo40HADgJEhdcjuFGCM9HuJEiCq24NZL+EKxsDNOgTr1YLB7KFTIK7Hc/JH/NwKafzxzkCgsD
+jELiLBBukyY4T6ZSGqFkhT33xdwOiNIo7dvJ1fXd/bU1WAilTTehdKz/X+dn762BicmU6MUQLJuEzH58
+bw008dRwcLK2c3wJdyRAFRIHy0G/EtxlXiSTuI4v436xrLI2tEx94mCAXLfQEq5ki1pGnifRI1rI9tpy
+fZ7ROr6EcYrQsrJC88kTowhuxB3TqqyBchYYYOywOCbWICR6oUwkbYXykTmokshkfkmi7GGKJ4DE45B+
+Tqp8bj4qCgIil0OwblCXfZ0IiRAlMXOb0CFYWfsN6pWEI7iK4iln45Ew9JkTd7P/UIKvREMpaOS0EpWo
+QsEV5gw5Pztbf9n0qpVriX1I8rIA/yfRHYL1zqboMs5ib9t3OXNm6YBrRR/OPux5vBvkKJlzLaWQawX/
+3Ltd5XFCs1wr4NECHCNKgfANfDTAY0Tpy8IjJJIEqFHmhNPlNxd0ufYa46VLZTfWg2NE6Qy/RKj0mwLn
+2ZGA87m0Zv+V/TsZ/zdRTNFHjVsCeRx37ozlpNurwTnnhA1UG2JYX5L4JWIS6RC0jDC7rJeh0WLKKO4d
+FL+J32LqlEFs8uHRezwpfGOVZIVHLfmfVBVKZerXC9wokqqXRNZ8HOw/zZlTTrBvgZVr4payMuNKE+5g
+st3C1hE8BoKe5ox5DYKux8/xEHQb2q1Bakq7nZNM0m/k+0eQauq48LDc4QghKeNm99qFRK7W3cqx3pSo
+Y5m8op5u3hDd7Bjhno96PnorfLQjlAuEtU2+6pnrJZiLZMeuXYjruQPeCoE62hp5XlP4AyPUc9YhOWun
+4Gbnmia2HXmrGOuevHry2ht57YTpAnW1zVnTnrcOdVpnmx7DXc9+JmZY4rNvW+yqTd/jSVbGmj5bvQKO
+Jcb/7wzlWaInuymT8TDjnfaPqZ7jQXZqUA/uVzwWa5utd9xvltL5NnvOo83rlc7r18FB10H7bL/jUijQ
+QYVkn/171B/oaKlt8t9pv1ZK/Z33bH0934N+j6Bvn+l3wn0hz5cF6wDf5/oe9nvbxv612m92eMKw+VGH
+0mbWlSLotJ195WcO1175vh457Cv2rYG9n3umm3V7vwT6JfBKlUznFbCPGy/l+4ltcR9bMe3B34O/ez2z
+ehvSdiQS3fJQPv+CWhHLq7fdUCVlzBPTCwiE0iBiKxRQdEnka6TVWF7N5yqezndfqY8L5rxGnb45g78h
+smNM2nMhtNKShGEW8O1xPl5yEjCH+P4SxCNKyWhyDFMYBZzVcqDAksJeARcUT3x8RD9rLtxjfmY9xKL3
+qC/zAxzP+ojNq7TtsKvk+Xkc15MkDW/9PI/8GWrJ8DFBO80tgxzWi4tgtT7+AcwFwpdtgH5zUKD//SBW
++3IbFxpcEXFqAma+qPUDid8BvHMtpm/Fu7iJyrTWFPM/0EntC6UBpWZrJMT5rr48TSubtVT9K7mfw/R1
+/dzUPudVtJpXtsSuOZn7SEtznAvhI8kg7vqRWrSUfZJMo3oQVyIImL4VXlMHx3yJ2k5FYkiYbC2skRvn
+fG7l5dmGeJa/OAnVQuiWozJO8Wu7ESc5UdN9VjnhVjHNbJ2iZIKO08KgAX5zXzh/3rNv2FY+cl2UP0c6
+kt26TInSXeZkEt7115DJZZO7N8RHrkZ5J/TIcVCp1s6YlILWyuvYDhJd3Fz1+n0nHHhM6bzb6jPKLJUv
+DD0rKGmd65Jf8CjZlu9oPoTSeBbEn5bUdEqB5WctO0w4OZWpDV3Vtr7DCBtvCzSfHqxctPrlns2pMa7R
+wxwbusLYnbRcnBem3GGeq1OXF4rcJFWfS+GmkPqZOFrIJht5FNwviKSqSZCpWK55LTqRNkXlA6vg5loH
+//RhPdYnZsqF5sEC8jWe1j3qCa0bbuWkLmGjDYmUKeHHqyL+WacG4W+CN9UqT8i8hW5yGnIaCsZ1gzJV
+HVUiJclvDjUGzQiLXVxQ3Ohv88l+Q6p+pqGQuk3outeH33cEX8Z9hds4+/RlC/cc1vAYtbtZuBE4pYnG
+BrJJ0pHOnY8qEUkHJ03+S/PmHeFCbZs4jQ7X3VrFeu4Ft+XniTwKksYTsCZ3k4fJ6Hby6+TuxlpdHP0y
+mtyOLm+vsyu316NfUomKt5H2QqRbpbWNlZHtxoR0sFXZknuq6c1Z0ZrYq6qcXBlhBmlXStSWS8WHYTp4
+y0fyyLg3ye5cdXdb9XIjnDJK9FsE09Y5+sWRlT/M6bKNW8tXBqTyTsw2m6O7ZtqIL9aL5NNcmsN84RDf
+yl9x/EjpLQrpF11vhZOKyhL8mfOJBla7XMnlOXpPMPsoEmxdFufSKvC62UT8GqKjkd7HvzBrkBbXIGqK
+8qOI5DY8+VHstwwllEpUasd65xUL2v27uPrO6DYpoe1BTcWjBZ0PGDZ01NxV6GDJI/Ej3Jn1/hcAAP//
+GyXHNPJZAAA=
`,
},
diff --git a/src/query/generated/assets/openapi/spec.yml b/src/query/generated/assets/openapi/spec.yml
index 9710666514..dfa3446a0c 100644
--- a/src/query/generated/assets/openapi/spec.yml
+++ b/src/query/generated/assets/openapi/spec.yml
@@ -2,7 +2,7 @@
swagger: "2.0"
info:
title: "M3DB"
- description: "A distributed time series database using M3TSZ float64 compression. You can find out more about M3DB at [http://m3db.github.io/m3/](http://m3db.github.io/m3/) or on [GitHub](https://github.com/m3db/m3)."
+ description: "A distributed time series database using M3TSZ float64 compression. You can find out more about M3DB at [https://docs.m3db.io/](https://docs.m3db.io/) or on [GitHub](https://github.com/m3db/m3)."
version: "1.0.0"
contact:
name: "M3BD Google Group"
diff --git a/src/query/generated/mocks/generate.go b/src/query/generated/mocks/generate.go
index 9f0a606f42..95393fd6e6 100644
--- a/src/query/generated/mocks/generate.go
+++ b/src/query/generated/mocks/generate.go
@@ -27,11 +27,12 @@
//go:generate sh -c "mockgen -package=ingest -destination=$GOPATH/src/$PACKAGE/src/cmd/services/m3coordinator/ingest/write_mock.go $PACKAGE/src/cmd/services/m3coordinator/ingest DownsamplerAndWriter"
//go:generate sh -c "mockgen -package=transform -destination=$GOPATH/src/$PACKAGE/src/query/executor/transform/types_mock.go $PACKAGE/src/query/executor/transform OpNode"
//go:generate sh -c "mockgen -package=executor -destination=$GOPATH/src/$PACKAGE/src/query/executor/types_mock.go $PACKAGE/src/query/executor Engine"
+//go:generate sh -c "mockgen -package=cost -destination=$GOPATH/src/github.com/m3db/m3/src/query/cost/cost_mock.go $PACKAGE/src/query/cost ChainedEnforcer,ChainedReporter"
+//go:generate sh -c "mockgen -package=storage -destination=$GOPATH/src/$PACKAGE/src/query/graphite/storage/storage_mock.go $PACKAGE/src/query/graphite/storage Storage"
// mockgen rules for generating mocks for unexported interfaces (file mode).
//go:generate sh -c "mockgen -package=m3ql -destination=$GOPATH/src/github.com/m3db/m3/src/query/parser/m3ql/types_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/parser/m3ql/types.go"
//go:generate sh -c "mockgen -package=transform -destination=$GOPATH/src/github.com/m3db/m3/src/query/executor/transform/exec_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/executor/transform/exec.go"
//go:generate sh -c "mockgen -package=temporal -destination=$GOPATH/src/github.com/m3db/m3/src/query/functions/temporal/dependencies_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/functions/temporal/dependencies.go" controller
-//go:generate sh -c "mockgen -package=cost -destination=$GOPATH/src/github.com/m3db/m3/src/query/cost/cost_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/cost/cost.go"
package mocks
diff --git a/src/query/generated/proto/admin/database.pb.go b/src/query/generated/proto/admin/database.pb.go
index 7eeba7974c..1cae5048e9 100644
--- a/src/query/generated/proto/admin/database.pb.go
+++ b/src/query/generated/proto/admin/database.pb.go
@@ -37,6 +37,7 @@
DatabaseCreateResponse
NamespaceGetResponse
NamespaceAddRequest
+ NamespaceUpdateRequest
NamespaceSchemaAddRequest
NamespaceSchemaAddResponse
NamespaceSchemaResetRequest
@@ -50,6 +51,7 @@
TopicGetResponse
TopicInitRequest
TopicAddRequest
+ TopicUpdateRequest
*/
package admin
diff --git a/src/query/generated/proto/admin/namespace.pb.go b/src/query/generated/proto/admin/namespace.pb.go
index 37aff6b163..f0df71845e 100644
--- a/src/query/generated/proto/admin/namespace.pb.go
+++ b/src/query/generated/proto/admin/namespace.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/query/generated/proto/admin/namespace.proto
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -75,6 +75,30 @@ func (m *NamespaceAddRequest) GetOptions() *namespace1.NamespaceOptions {
return nil
}
+type NamespaceUpdateRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Options *namespace1.NamespaceOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *NamespaceUpdateRequest) Reset() { *m = NamespaceUpdateRequest{} }
+func (m *NamespaceUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*NamespaceUpdateRequest) ProtoMessage() {}
+func (*NamespaceUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
+
+func (m *NamespaceUpdateRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *NamespaceUpdateRequest) GetOptions() *namespace1.NamespaceOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
type NamespaceSchemaAddRequest struct {
// Name is the namespace name.
// Add schema to non-existent namespace will get 404.
@@ -98,7 +122,7 @@ func (m *NamespaceSchemaAddRequest) Reset() { *m = NamespaceSchemaAddReq
func (m *NamespaceSchemaAddRequest) String() string { return proto.CompactTextString(m) }
func (*NamespaceSchemaAddRequest) ProtoMessage() {}
func (*NamespaceSchemaAddRequest) Descriptor() ([]byte, []int) {
- return fileDescriptorNamespace, []int{2}
+ return fileDescriptorNamespace, []int{3}
}
func (m *NamespaceSchemaAddRequest) GetName() string {
@@ -137,7 +161,7 @@ func (m *NamespaceSchemaAddResponse) Reset() { *m = NamespaceSchemaAddRe
func (m *NamespaceSchemaAddResponse) String() string { return proto.CompactTextString(m) }
func (*NamespaceSchemaAddResponse) ProtoMessage() {}
func (*NamespaceSchemaAddResponse) Descriptor() ([]byte, []int) {
- return fileDescriptorNamespace, []int{3}
+ return fileDescriptorNamespace, []int{4}
}
func (m *NamespaceSchemaAddResponse) GetDeployID() string {
@@ -157,7 +181,7 @@ func (m *NamespaceSchemaResetRequest) Reset() { *m = NamespaceSchemaRese
func (m *NamespaceSchemaResetRequest) String() string { return proto.CompactTextString(m) }
func (*NamespaceSchemaResetRequest) ProtoMessage() {}
func (*NamespaceSchemaResetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptorNamespace, []int{4}
+ return fileDescriptorNamespace, []int{5}
}
func (m *NamespaceSchemaResetRequest) GetName() string {
@@ -174,12 +198,13 @@ func (m *NamespaceSchemaResetResponse) Reset() { *m = NamespaceSchemaRes
func (m *NamespaceSchemaResetResponse) String() string { return proto.CompactTextString(m) }
func (*NamespaceSchemaResetResponse) ProtoMessage() {}
func (*NamespaceSchemaResetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptorNamespace, []int{5}
+ return fileDescriptorNamespace, []int{6}
}
func init() {
proto.RegisterType((*NamespaceGetResponse)(nil), "admin.NamespaceGetResponse")
proto.RegisterType((*NamespaceAddRequest)(nil), "admin.NamespaceAddRequest")
+ proto.RegisterType((*NamespaceUpdateRequest)(nil), "admin.NamespaceUpdateRequest")
proto.RegisterType((*NamespaceSchemaAddRequest)(nil), "admin.NamespaceSchemaAddRequest")
proto.RegisterType((*NamespaceSchemaAddResponse)(nil), "admin.NamespaceSchemaAddResponse")
proto.RegisterType((*NamespaceSchemaResetRequest)(nil), "admin.NamespaceSchemaResetRequest")
@@ -247,6 +272,40 @@ func (m *NamespaceAddRequest) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *NamespaceUpdateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamespaceUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if m.Options != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintNamespace(dAtA, i, uint64(m.Options.Size()))
+ n3, err := m.Options.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ return i, nil
+}
+
func (m *NamespaceSchemaAddRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -399,6 +458,20 @@ func (m *NamespaceAddRequest) Size() (n int) {
return n
}
+func (m *NamespaceUpdateRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovNamespace(uint64(l))
+ }
+ if m.Options != nil {
+ l = m.Options.Size()
+ n += 1 + l + sovNamespace(uint64(l))
+ }
+ return n
+}
+
func (m *NamespaceSchemaAddRequest) Size() (n int) {
var l int
_ = l
@@ -659,6 +732,118 @@ func (m *NamespaceAddRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *NamespaceUpdateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNamespace
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamespaceUpdateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamespaceUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNamespace
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthNamespace
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowNamespace
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthNamespace
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Options == nil {
+ m.Options = &namespace1.NamespaceOptions{}
+ }
+ if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipNamespace(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthNamespace
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *NamespaceSchemaAddRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -1232,29 +1417,30 @@ func init() {
}
var fileDescriptorNamespace = []byte{
- // 381 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xdf, 0x8a, 0xda, 0x40,
- 0x14, 0xc6, 0x1b, 0xff, 0x54, 0x3d, 0x52, 0x90, 0xd1, 0x8b, 0x34, 0x4a, 0x90, 0x5c, 0x79, 0x95,
- 0xa1, 0x4a, 0x41, 0xda, 0xab, 0x4a, 0x8b, 0xb4, 0xd0, 0x3f, 0x4c, 0x5f, 0xa0, 0x49, 0xe6, 0x10,
- 0x43, 0x4d, 0x26, 0x66, 0x92, 0x85, 0xbc, 0xc5, 0x3e, 0xd6, 0x5e, 0xee, 0x23, 0x2c, 0xee, 0x0b,
- 0xec, 0x23, 0x2c, 0x19, 0x93, 0xb8, 0xeb, 0x2a, 0x7b, 0x37, 0xe7, 0x7c, 0xe7, 0xfb, 0x7d, 0x33,
- 0x67, 0x60, 0xe5, 0x07, 0xe9, 0x26, 0x73, 0x6d, 0x4f, 0x84, 0x34, 0x5c, 0x70, 0x97, 0x86, 0x0b,
- 0x2a, 0x13, 0x8f, 0xee, 0x32, 0x4c, 0x72, 0xea, 0x63, 0x84, 0x89, 0x93, 0x22, 0xa7, 0x71, 0x22,
- 0x52, 0x41, 0x1d, 0x1e, 0x06, 0x11, 0x8d, 0x9c, 0x10, 0x65, 0xec, 0x78, 0x68, 0xab, 0x2e, 0x69,
- 0xab, 0xb6, 0xb1, 0xbe, 0x80, 0xe2, 0x6e, 0x24, 0x38, 0xbe, 0x60, 0xd5, 0x94, 0x53, 0x9e, 0xb5,
- 0x86, 0xd1, 0xaf, 0xaa, 0xb5, 0xc6, 0x94, 0xa1, 0x8c, 0x45, 0x24, 0x91, 0x50, 0xe8, 0x26, 0xe8,
- 0x07, 0x32, 0x4d, 0x72, 0x5d, 0x9b, 0x6a, 0xb3, 0xfe, 0x7c, 0x68, 0x1f, 0xbd, 0xac, 0x94, 0x58,
- 0x3d, 0x64, 0xfd, 0x83, 0x61, 0x0d, 0xfa, 0xc2, 0x39, 0xc3, 0x5d, 0x86, 0x32, 0x25, 0x04, 0x5a,
- 0x85, 0x4d, 0x31, 0x7a, 0x4c, 0x9d, 0xc9, 0x47, 0xe8, 0x88, 0x38, 0x0d, 0x44, 0x24, 0xf5, 0x86,
- 0x42, 0x8f, 0x9f, 0xa0, 0x6b, 0xc8, 0xef, 0xc3, 0x08, 0xab, 0x66, 0xad, 0x07, 0x0d, 0xde, 0xd7,
- 0xea, 0x5f, 0x6f, 0x83, 0xa1, 0xf3, 0x4a, 0x90, 0x0e, 0x9d, 0x50, 0xfa, 0x85, 0x47, 0x05, 0xf5,
- 0x58, 0x55, 0x92, 0x09, 0xf4, 0xd4, 0xfb, 0x95, 0xd6, 0x54, 0xda, 0xb1, 0x41, 0x7e, 0x40, 0x57,
- 0x15, 0x3f, 0x9d, 0x58, 0x6f, 0x4d, 0x9b, 0xb3, 0xfe, 0xdc, 0xb6, 0xd5, 0xde, 0xed, 0x8b, 0xf9,
- 0xf6, 0x9f, 0xd2, 0xf0, 0x2d, 0x52, 0x7b, 0xa9, 0xfc, 0xc6, 0x67, 0x78, 0xf7, 0x4c, 0x22, 0x03,
- 0x68, 0xfe, 0xc7, 0xbc, 0xbc, 0x67, 0x71, 0x24, 0x23, 0x68, 0x5f, 0x39, 0xdb, 0xac, 0xba, 0xe4,
- 0xa1, 0xf8, 0xd4, 0x58, 0x6a, 0xd6, 0x12, 0x8c, 0x73, 0x89, 0xe5, 0x1f, 0x19, 0xd0, 0xe5, 0x18,
- 0x6f, 0x45, 0xfe, 0xfd, 0x6b, 0x89, 0xab, 0x6b, 0xeb, 0x03, 0x8c, 0x4f, 0x9c, 0x0c, 0x65, 0xf1,
- 0xbf, 0x17, 0xb7, 0x65, 0x99, 0x30, 0x39, 0x6f, 0x39, 0xc4, 0xad, 0x06, 0x37, 0x7b, 0x53, 0xbb,
- 0xdd, 0x9b, 0xda, 0xdd, 0xde, 0xd4, 0xae, 0xef, 0xcd, 0x37, 0xee, 0x5b, 0xf5, 0xca, 0xc5, 0x63,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xa6, 0x90, 0x2e, 0xd9, 0x02, 0x00, 0x00,
+ // 393 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0xaa, 0xd3, 0x40,
+ 0x14, 0xc6, 0xcd, 0xed, 0xbd, 0xde, 0xf6, 0x14, 0xa1, 0x4c, 0x8b, 0xc4, 0xb4, 0x84, 0x92, 0x55,
+ 0x57, 0x19, 0x6c, 0x11, 0x8a, 0xae, 0x2c, 0x4a, 0x51, 0xf0, 0x0f, 0x23, 0xee, 0x9d, 0x64, 0x0e,
+ 0x69, 0xb0, 0xc9, 0xa4, 0x99, 0x89, 0x90, 0xb7, 0xf0, 0xb1, 0x5c, 0xfa, 0x08, 0x52, 0x5f, 0xc0,
+ 0x47, 0x90, 0x4c, 0x93, 0x54, 0x6b, 0x8b, 0xab, 0xbb, 0xcb, 0x39, 0xdf, 0xf9, 0x7e, 0xdf, 0xcc,
+ 0xc9, 0xc0, 0x2a, 0x8a, 0xf5, 0xa6, 0x08, 0xfc, 0x50, 0x26, 0x34, 0x59, 0x88, 0x80, 0x26, 0x0b,
+ 0xaa, 0xf2, 0x90, 0xee, 0x0a, 0xcc, 0x4b, 0x1a, 0x61, 0x8a, 0x39, 0xd7, 0x28, 0x68, 0x96, 0x4b,
+ 0x2d, 0x29, 0x17, 0x49, 0x9c, 0xd2, 0x94, 0x27, 0xa8, 0x32, 0x1e, 0xa2, 0x6f, 0xba, 0xe4, 0xc6,
+ 0xb4, 0x9d, 0xf5, 0x05, 0x94, 0x08, 0x52, 0x29, 0xf0, 0x1f, 0x56, 0x4b, 0x39, 0xe5, 0x79, 0x6b,
+ 0x18, 0xbd, 0x6d, 0x5a, 0x6b, 0xd4, 0x0c, 0x55, 0x26, 0x53, 0x85, 0x84, 0x42, 0x37, 0xc7, 0x28,
+ 0x56, 0x3a, 0x2f, 0x6d, 0x6b, 0x6a, 0xcd, 0xfa, 0xf3, 0xa1, 0x7f, 0xf4, 0xb2, 0x5a, 0x62, 0xed,
+ 0x90, 0xf7, 0x09, 0x86, 0x2d, 0xe8, 0xb9, 0x10, 0x0c, 0x77, 0x05, 0x2a, 0x4d, 0x08, 0x5c, 0x57,
+ 0x36, 0xc3, 0xe8, 0x31, 0xf3, 0x4d, 0x9e, 0xc0, 0xad, 0xcc, 0x74, 0x2c, 0x53, 0x65, 0x5f, 0x19,
+ 0xf4, 0xf8, 0x0f, 0x74, 0x0b, 0x79, 0x77, 0x18, 0x61, 0xcd, 0xac, 0x17, 0xc2, 0xc3, 0x56, 0xfc,
+ 0x98, 0x09, 0xae, 0xf1, 0x0e, 0x42, 0x7e, 0x59, 0xf0, 0xa8, 0x55, 0x3f, 0x84, 0x1b, 0x4c, 0xf8,
+ 0x7f, 0x6e, 0x63, 0xc3, 0x6d, 0xa2, 0xa2, 0xca, 0x63, 0x82, 0x7a, 0xac, 0x29, 0xc9, 0x04, 0x7a,
+ 0x66, 0xc9, 0x46, 0xeb, 0x18, 0xed, 0xd8, 0x20, 0xaf, 0xa1, 0x6b, 0x8a, 0x37, 0x3c, 0xb3, 0xaf,
+ 0xa7, 0x9d, 0x59, 0x7f, 0xee, 0xfb, 0xe6, 0xe7, 0xfa, 0x17, 0xf3, 0xfd, 0xf7, 0xb5, 0xe1, 0x65,
+ 0x6a, 0x96, 0xdf, 0xf8, 0x9d, 0x67, 0xf0, 0xe0, 0x2f, 0x89, 0x0c, 0xa0, 0xf3, 0x19, 0xcb, 0xfa,
+ 0x9c, 0xd5, 0x27, 0x19, 0xc1, 0xcd, 0x17, 0xbe, 0x2d, 0x9a, 0x43, 0x1e, 0x8a, 0xa7, 0x57, 0x4b,
+ 0xcb, 0x5b, 0x82, 0x73, 0x2e, 0xb1, 0x7e, 0x08, 0x0e, 0x74, 0x05, 0x66, 0x5b, 0x59, 0xbe, 0x7a,
+ 0x51, 0xe3, 0xda, 0xda, 0x7b, 0x0c, 0xe3, 0x13, 0x27, 0x43, 0x55, 0x3d, 0xa2, 0x8b, 0xdb, 0xf2,
+ 0x5c, 0x98, 0x9c, 0xb7, 0x1c, 0xe2, 0x56, 0x83, 0x6f, 0x7b, 0xd7, 0xfa, 0xbe, 0x77, 0xad, 0x1f,
+ 0x7b, 0xd7, 0xfa, 0xfa, 0xd3, 0xbd, 0x17, 0xdc, 0x37, 0xb7, 0x5c, 0xfc, 0x0e, 0x00, 0x00, 0xff,
+ 0xff, 0xc9, 0xd3, 0x22, 0xa4, 0x3e, 0x03, 0x00, 0x00,
}
diff --git a/src/query/generated/proto/admin/namespace.proto b/src/query/generated/proto/admin/namespace.proto
index d9c59c5b43..44bb1921e3 100644
--- a/src/query/generated/proto/admin/namespace.proto
+++ b/src/query/generated/proto/admin/namespace.proto
@@ -13,6 +13,11 @@ message NamespaceAddRequest {
namespace.NamespaceOptions options = 2;
}
+message NamespaceUpdateRequest {
+ string name = 1;
+ namespace.NamespaceOptions options = 2;
+}
+
message NamespaceSchemaAddRequest {
// Name is the namespace name.
// Add schema to non-existent namespace will get 404.
diff --git a/src/query/generated/proto/admin/topic.pb.go b/src/query/generated/proto/admin/topic.pb.go
index 7180b3fb9a..6448029cc6 100644
--- a/src/query/generated/proto/admin/topic.pb.go
+++ b/src/query/generated/proto/admin/topic.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/query/generated/proto/admin/topic.proto
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -91,10 +91,37 @@ func (m *TopicAddRequest) GetConsumerService() *topicpb.ConsumerService {
return nil
}
+// Request to update a topic and set its consumer services to a new value.
+// CheckAndSet is used based on the version passed.
+type TopicUpdateRequest struct {
+ ConsumerServices []*topicpb.ConsumerService `protobuf:"bytes,1,rep,name=consumer_services,json=consumerServices" json:"consumer_services,omitempty"`
+ Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (m *TopicUpdateRequest) Reset() { *m = TopicUpdateRequest{} }
+func (m *TopicUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*TopicUpdateRequest) ProtoMessage() {}
+func (*TopicUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorTopic, []int{3} }
+
+func (m *TopicUpdateRequest) GetConsumerServices() []*topicpb.ConsumerService {
+ if m != nil {
+ return m.ConsumerServices
+ }
+ return nil
+}
+
+func (m *TopicUpdateRequest) GetVersion() uint32 {
+ if m != nil {
+ return m.Version
+ }
+ return 0
+}
+
func init() {
proto.RegisterType((*TopicGetResponse)(nil), "admin.TopicGetResponse")
proto.RegisterType((*TopicInitRequest)(nil), "admin.TopicInitRequest")
proto.RegisterType((*TopicAddRequest)(nil), "admin.TopicAddRequest")
+ proto.RegisterType((*TopicUpdateRequest)(nil), "admin.TopicUpdateRequest")
}
func (m *TopicGetResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -180,6 +207,41 @@ func (m *TopicAddRequest) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
+func (m *TopicUpdateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TopicUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ConsumerServices) > 0 {
+ for _, msg := range m.ConsumerServices {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintTopic(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.Version != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintTopic(dAtA, i, uint64(m.Version))
+ }
+ return i, nil
+}
+
func encodeVarintTopic(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -221,6 +283,21 @@ func (m *TopicAddRequest) Size() (n int) {
return n
}
+func (m *TopicUpdateRequest) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.ConsumerServices) > 0 {
+ for _, e := range m.ConsumerServices {
+ l = e.Size()
+ n += 1 + l + sovTopic(uint64(l))
+ }
+ }
+ if m.Version != 0 {
+ n += 1 + sovTopic(uint64(m.Version))
+ }
+ return n
+}
+
func sovTopic(x uint64) (n int) {
for {
n++
@@ -488,6 +565,106 @@ func (m *TopicAddRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *TopicUpdateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTopic
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TopicUpdateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TopicUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsumerServices", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTopic
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTopic
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConsumerServices = append(m.ConsumerServices, &topicpb.ConsumerService{})
+ if err := m.ConsumerServices[len(m.ConsumerServices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ m.Version = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTopic
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Version |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTopic(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthTopic
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipTopic(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
@@ -598,23 +775,24 @@ func init() {
}
var fileDescriptorTopic = []byte{
- // 274 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xc1, 0x4a, 0xc3, 0x40,
- 0x10, 0x86, 0x5d, 0xa1, 0x0a, 0x2b, 0x6d, 0x43, 0x4e, 0xc1, 0x43, 0x28, 0xc5, 0x43, 0x4e, 0x59,
- 0x30, 0x57, 0x11, 0xb4, 0x07, 0xf1, 0x24, 0x6c, 0xc5, 0x6b, 0x48, 0x76, 0xa7, 0xe9, 0x1e, 0x76,
- 0x37, 0xdd, 0xd9, 0x14, 0x7c, 0x0b, 0x1f, 0xcb, 0xa3, 0x8f, 0x20, 0xf1, 0x45, 0xc4, 0x4d, 0x2a,
- 0x8a, 0x78, 0x9c, 0x6f, 0xfe, 0xff, 0x63, 0x18, 0x7a, 0xdd, 0x28, 0xbf, 0xed, 0xea, 0x5c, 0x58,
- 0xcd, 0x74, 0x21, 0x6b, 0xa6, 0x0b, 0x86, 0x4e, 0xb0, 0x5d, 0x07, 0xee, 0x99, 0x35, 0x60, 0xc0,
- 0x55, 0x1e, 0x24, 0x6b, 0x9d, 0xf5, 0x96, 0x55, 0x52, 0x2b, 0xc3, 0xbc, 0x6d, 0x95, 0xc8, 0x03,
- 0x89, 0x27, 0x01, 0x9d, 0xff, 0xa7, 0xd1, 0xd8, 0xfc, 0x91, 0x84, 0x7a, 0x5b, 0xff, 0xd4, 0x2c,
- 0x39, 0x8d, 0x1e, 0xbf, 0xc6, 0x3b, 0xf0, 0x1c, 0xb0, 0xb5, 0x06, 0x21, 0xbe, 0xa0, 0x93, 0x10,
- 0x49, 0xc8, 0x82, 0x64, 0x67, 0x97, 0xb3, 0x7c, 0x2c, 0xe6, 0x21, 0xc9, 0x87, 0x65, 0x9c, 0xd0,
- 0xd3, 0x3d, 0x38, 0x54, 0xd6, 0x24, 0xc7, 0x0b, 0x92, 0x4d, 0xf9, 0x61, 0x5c, 0x5e, 0x8d, 0xce,
- 0x7b, 0xa3, 0x3c, 0x87, 0x5d, 0x07, 0xe8, 0xe3, 0x8c, 0x46, 0xa6, 0xd3, 0x35, 0xb8, 0xd2, 0x6e,
- 0x4a, 0xdc, 0x56, 0x4e, 0x62, 0xd0, 0x4f, 0xf9, 0x6c, 0xe0, 0x0f, 0x9b, 0x75, 0xa0, 0xcb, 0x27,
- 0x3a, 0x0f, 0xed, 0x1b, 0x29, 0x0f, 0xe5, 0x15, 0x8d, 0x84, 0x35, 0xd8, 0x69, 0x70, 0x25, 0x82,
- 0xdb, 0x2b, 0x01, 0xe3, 0x6d, 0xc9, 0xf7, 0x6d, 0xab, 0x31, 0xb0, 0x1e, 0xf6, 0x7c, 0x2e, 0x7e,
- 0x83, 0xdb, 0xe8, 0xb5, 0x4f, 0xc9, 0x5b, 0x9f, 0x92, 0xf7, 0x3e, 0x25, 0x2f, 0x1f, 0xe9, 0x51,
- 0x7d, 0x12, 0x5e, 0x50, 0x7c, 0x06, 0x00, 0x00, 0xff, 0xff, 0x30, 0x35, 0xc2, 0x23, 0x8b, 0x01,
- 0x00, 0x00,
+ // 303 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xcd, 0x4a, 0xf3, 0x40,
+ 0x14, 0x86, 0xbf, 0xf9, 0xa4, 0x0a, 0x23, 0x6d, 0x63, 0x56, 0xc1, 0x45, 0x28, 0xc1, 0x45, 0x57,
+ 0x19, 0xb0, 0x5b, 0x11, 0xb4, 0x88, 0xb8, 0x12, 0xa6, 0xea, 0xb6, 0x24, 0x33, 0xa7, 0xed, 0x2c,
+ 0x66, 0x26, 0x9d, 0x9f, 0x82, 0x77, 0xe1, 0x65, 0xb9, 0xf4, 0x12, 0x24, 0xde, 0x88, 0x74, 0x92,
+ 0x88, 0x56, 0xea, 0xf2, 0x3c, 0xe7, 0xbc, 0x0f, 0x2f, 0x07, 0x5f, 0x2e, 0x85, 0x5b, 0xf9, 0x32,
+ 0x67, 0x5a, 0x12, 0x39, 0xe1, 0x25, 0x91, 0x13, 0x62, 0x0d, 0x23, 0x6b, 0x0f, 0xe6, 0x99, 0x2c,
+ 0x41, 0x81, 0x29, 0x1c, 0x70, 0x52, 0x19, 0xed, 0x34, 0x29, 0xb8, 0x14, 0x8a, 0x38, 0x5d, 0x09,
+ 0x96, 0x07, 0x12, 0xf7, 0x02, 0x3a, 0xdd, 0xa7, 0x91, 0x76, 0xf9, 0x4b, 0x12, 0xe2, 0x55, 0xf9,
+ 0x5d, 0x93, 0x51, 0x1c, 0x3d, 0x6c, 0xc7, 0x5b, 0x70, 0x14, 0x6c, 0xa5, 0x95, 0x85, 0xf8, 0x0c,
+ 0xf7, 0xc2, 0x49, 0x82, 0x46, 0x68, 0x7c, 0x7c, 0x3e, 0xc8, 0xdb, 0x60, 0x1e, 0x2e, 0x69, 0xb3,
+ 0x8c, 0x13, 0x7c, 0xb4, 0x01, 0x63, 0x85, 0x56, 0xc9, 0xff, 0x11, 0x1a, 0xf7, 0x69, 0x37, 0x66,
+ 0x17, 0xad, 0xf3, 0x4e, 0x09, 0x47, 0x61, 0xed, 0xc1, 0xba, 0x78, 0x8c, 0x23, 0xe5, 0x65, 0x09,
+ 0x66, 0xae, 0x17, 0x73, 0xbb, 0x2a, 0x0c, 0xb7, 0x41, 0xdf, 0xa7, 0x83, 0x86, 0xdf, 0x2f, 0x66,
+ 0x81, 0x66, 0x4f, 0x78, 0x18, 0xd2, 0x57, 0x9c, 0x77, 0xe1, 0x29, 0x8e, 0x98, 0x56, 0xd6, 0x4b,
+ 0x30, 0x73, 0x0b, 0x66, 0x23, 0x18, 0xb4, 0xdd, 0x92, 0xaf, 0x6e, 0xd3, 0xf6, 0x60, 0xd6, 0xec,
+ 0xe9, 0x90, 0xfd, 0x04, 0x99, 0xc7, 0x71, 0xf0, 0x3e, 0x56, 0xbc, 0x70, 0xd0, 0xa9, 0x6f, 0xf0,
+ 0xc9, 0xae, 0x7a, 0x5b, 0xec, 0xe0, 0x4f, 0x77, 0xb4, 0xe3, 0xb6, 0xfb, 0x9f, 0x71, 0x1d, 0xbd,
+ 0xd6, 0x29, 0x7a, 0xab, 0x53, 0xf4, 0x5e, 0xa7, 0xe8, 0xe5, 0x23, 0xfd, 0x57, 0x1e, 0x86, 0xcf,
+ 0x4f, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x38, 0x38, 0xc7, 0xf0, 0x02, 0x02, 0x00, 0x00,
}
diff --git a/src/query/generated/proto/admin/topic.proto b/src/query/generated/proto/admin/topic.proto
index b5963d0b90..0983cf7eaa 100644
--- a/src/query/generated/proto/admin/topic.proto
+++ b/src/query/generated/proto/admin/topic.proto
@@ -15,3 +15,10 @@ message TopicInitRequest {
message TopicAddRequest {
topicpb.ConsumerService consumer_service = 1;
}
+
+// Request to update a topic and set its consumer services to a new value.
+// CheckAndSet is used based on the version passed.
+message TopicUpdateRequest {
+ repeated topicpb.ConsumerService consumer_services = 1;
+ uint32 version = 2;
+}
diff --git a/src/query/generated/proto/prompb/types.pb.go b/src/query/generated/proto/prompb/types.pb.go
index e804250c84..542008f7dc 100644
--- a/src/query/generated/proto/prompb/types.pb.go
+++ b/src/query/generated/proto/prompb/types.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/query/generated/proto/prompb/types.proto
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -37,6 +37,51 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
+type Type int32
+
+const (
+ Type_GAUGE Type = 0
+ Type_COUNTER Type = 1
+ Type_TIMER Type = 2
+)
+
+var Type_name = map[int32]string{
+ 0: "GAUGE",
+ 1: "COUNTER",
+ 2: "TIMER",
+}
+var Type_value = map[string]int32{
+ "GAUGE": 0,
+ "COUNTER": 1,
+ "TIMER": 2,
+}
+
+func (x Type) String() string {
+ return proto.EnumName(Type_name, int32(x))
+}
+func (Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
+
+type Source int32
+
+const (
+ Source_PROMETHEUS Source = 0
+ Source_GRAPHITE Source = 1
+)
+
+var Source_name = map[int32]string{
+ 0: "PROMETHEUS",
+ 1: "GRAPHITE",
+}
+var Source_value = map[string]int32{
+ "PROMETHEUS": 0,
+ "GRAPHITE": 1,
+}
+
+func (x Source) String() string {
+ return proto.EnumName(Source_name, int32(x))
+}
+func (Source) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
+
type LabelMatcher_Type int32
const (
@@ -91,6 +136,10 @@ func (m *Sample) GetTimestamp() int64 {
type TimeSeries struct {
Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"`
Samples []Sample `protobuf:"bytes,2,rep,name=samples" json:"samples"`
+ // NB: These are custom fields that M3 uses. They start at 101 so that they
+ // should never clash with prometheus fields.
+ Type Type `protobuf:"varint,101,opt,name=type,proto3,enum=m3prometheus.Type" json:"type,omitempty"`
+ Source Source `protobuf:"varint,102,opt,name=source,proto3,enum=m3prometheus.Source" json:"source,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
@@ -112,6 +161,20 @@ func (m *TimeSeries) GetSamples() []Sample {
return nil
}
+func (m *TimeSeries) GetType() Type {
+ if m != nil {
+ return m.Type
+ }
+ return Type_GAUGE
+}
+
+func (m *TimeSeries) GetSource() Source {
+ if m != nil {
+ return m.Source
+ }
+ return Source_PROMETHEUS
+}
+
type Label struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
@@ -191,6 +254,8 @@ func init() {
proto.RegisterType((*Label)(nil), "m3prometheus.Label")
proto.RegisterType((*Labels)(nil), "m3prometheus.Labels")
proto.RegisterType((*LabelMatcher)(nil), "m3prometheus.LabelMatcher")
+ proto.RegisterEnum("m3prometheus.Type", Type_name, Type_value)
+ proto.RegisterEnum("m3prometheus.Source", Source_name, Source_value)
proto.RegisterEnum("m3prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value)
}
func (m *Sample) Marshal() (dAtA []byte, err error) {
@@ -261,6 +326,20 @@ func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
+ if m.Type != 0 {
+ dAtA[i] = 0xa8
+ i++
+ dAtA[i] = 0x6
+ i++
+ i = encodeVarintTypes(dAtA, i, uint64(m.Type))
+ }
+ if m.Source != 0 {
+ dAtA[i] = 0xb0
+ i++
+ dAtA[i] = 0x6
+ i++
+ i = encodeVarintTypes(dAtA, i, uint64(m.Source))
+ }
return i, nil
}
@@ -395,6 +474,12 @@ func (m *TimeSeries) Size() (n int) {
n += 1 + l + sovTypes(uint64(l))
}
}
+ if m.Type != 0 {
+ n += 2 + sovTypes(uint64(m.Type))
+ }
+ if m.Source != 0 {
+ n += 2 + sovTypes(uint64(m.Source))
+ }
return n
}
@@ -625,6 +710,44 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 101:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (Type(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 102:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ m.Source = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Source |= (Source(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
@@ -1080,29 +1203,35 @@ func init() {
}
var fileDescriptorTypes = []byte{
- // 369 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xc1, 0x6a, 0xdb, 0x40,
- 0x10, 0xf5, 0x4a, 0xb2, 0x4c, 0xa7, 0xa6, 0x88, 0xad, 0x0f, 0xa2, 0x14, 0xd9, 0xe8, 0xa4, 0x4b,
- 0x25, 0x6c, 0xf5, 0xd6, 0x42, 0xc1, 0xa0, 0x9b, 0x1b, 0xb0, 0xec, 0x53, 0x6e, 0x92, 0x3d, 0x91,
- 0x05, 0x5a, 0x4b, 0xd1, 0xae, 0x02, 0xfe, 0x8b, 0xdc, 0xf2, 0x4b, 0x3e, 0xe6, 0x0b, 0x42, 0x70,
- 0x7e, 0x24, 0x68, 0xd7, 0x89, 0x1d, 0xf0, 0x25, 0x97, 0x65, 0xe6, 0xed, 0x7b, 0x6f, 0xde, 0x32,
- 0x0b, 0xff, 0xb2, 0x5c, 0x6c, 0x9a, 0xd4, 0x5f, 0x95, 0x2c, 0x60, 0xe1, 0x3a, 0x0d, 0x58, 0x18,
- 0xf0, 0x7a, 0x15, 0xdc, 0x36, 0x58, 0xef, 0x82, 0x0c, 0xb7, 0x58, 0x27, 0x02, 0xd7, 0x41, 0x55,
- 0x97, 0xa2, 0x6c, 0x4f, 0x56, 0xa5, 0x81, 0xd8, 0x55, 0xc8, 0x7d, 0x09, 0xd1, 0x3e, 0x0b, 0x5b,
- 0x14, 0xc5, 0x06, 0x1b, 0xfe, 0xe3, 0xd7, 0x99, 0x5d, 0x56, 0x66, 0xa5, 0xd2, 0xa5, 0xcd, 0x8d,
- 0xec, 0x94, 0x49, 0x5b, 0x29, 0xb1, 0xfb, 0x17, 0xcc, 0x45, 0xc2, 0xaa, 0x02, 0xe9, 0x00, 0xba,
- 0x77, 0x49, 0xd1, 0xa0, 0x4d, 0x46, 0xc4, 0x23, 0xb1, 0x6a, 0xe8, 0x4f, 0xf8, 0x22, 0x72, 0x86,
- 0x5c, 0x24, 0xac, 0xb2, 0xb5, 0x11, 0xf1, 0xf4, 0xf8, 0x04, 0xb8, 0x0d, 0xc0, 0x32, 0x67, 0xb8,
- 0xc0, 0x3a, 0x47, 0x4e, 0xc7, 0x60, 0x16, 0x49, 0x8a, 0x05, 0xb7, 0xc9, 0x48, 0xf7, 0xbe, 0x4e,
- 0xbe, 0xfb, 0xe7, 0xc9, 0xfc, 0x59, 0x7b, 0x37, 0x35, 0xf6, 0x4f, 0xc3, 0x4e, 0x7c, 0x24, 0xd2,
- 0xdf, 0xd0, 0xe3, 0x72, 0x3c, 0xb7, 0x35, 0xa9, 0x19, 0x7c, 0xd4, 0xa8, 0x6c, 0x47, 0xd1, 0x1b,
- 0xd5, 0x1d, 0x43, 0x57, 0x9a, 0x51, 0x0a, 0xc6, 0x36, 0x61, 0x2a, 0x72, 0x3f, 0x96, 0xf5, 0xe9,
- 0x1d, 0x9a, 0x04, 0x55, 0xe3, 0xfe, 0x01, 0x73, 0xa6, 0x46, 0x7e, 0x3e, 0xa5, 0xfb, 0x40, 0xa0,
- 0x2f, 0xf1, 0xff, 0x89, 0x58, 0x6d, 0xb0, 0xa6, 0x21, 0x18, 0xed, 0x06, 0xe4, 0xdc, 0x6f, 0x93,
- 0xe1, 0x05, 0x87, 0x23, 0xd3, 0x5f, 0xee, 0x2a, 0x8c, 0x25, 0xf9, 0x3d, 0xac, 0x76, 0x29, 0xac,
- 0x7e, 0x1e, 0xd6, 0x03, 0xa3, 0xd5, 0x51, 0x13, 0xb4, 0x68, 0x6e, 0x75, 0x68, 0x0f, 0xf4, 0xab,
- 0x68, 0x6e, 0x91, 0x16, 0x88, 0x23, 0x4b, 0x93, 0x40, 0x1c, 0x59, 0xfa, 0xd4, 0xde, 0x1f, 0x1c,
- 0xf2, 0x78, 0x70, 0xc8, 0xf3, 0xc1, 0x21, 0xf7, 0x2f, 0x4e, 0xe7, 0xda, 0x54, 0x3f, 0x24, 0x35,
- 0xe5, 0x7e, 0xc3, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x54, 0xd9, 0x83, 0x5f, 0x02, 0x00,
- 0x00,
+ // 467 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4f, 0x6b, 0xdb, 0x4e,
+ 0x10, 0xd5, 0x4a, 0xb6, 0xfc, 0xcb, 0xc4, 0x84, 0x65, 0x7f, 0x39, 0x88, 0x52, 0x1c, 0xa3, 0x43,
+ 0x50, 0x43, 0x6b, 0x91, 0xa8, 0xb7, 0x16, 0x4a, 0x52, 0x16, 0x27, 0x10, 0xe7, 0xcf, 0x5a, 0xbe,
+ 0xf4, 0x26, 0x39, 0x13, 0xdb, 0xe0, 0x8d, 0x54, 0xad, 0x54, 0xf0, 0xb7, 0xe8, 0xad, 0x5f, 0x29,
+ 0xd0, 0x4b, 0x3f, 0x41, 0x29, 0xee, 0x17, 0x29, 0xda, 0x55, 0x88, 0x53, 0x72, 0xe9, 0x45, 0xec,
+ 0xbc, 0x79, 0x6f, 0xf4, 0xe6, 0x31, 0xf0, 0x61, 0xb6, 0x28, 0xe7, 0x55, 0x3a, 0x98, 0x66, 0x32,
+ 0x94, 0xd1, 0x4d, 0x1a, 0xca, 0x28, 0x54, 0xc5, 0x34, 0xfc, 0x5c, 0x61, 0xb1, 0x0a, 0x67, 0x78,
+ 0x87, 0x45, 0x52, 0xe2, 0x4d, 0x98, 0x17, 0x59, 0x99, 0xd5, 0x5f, 0x99, 0xa7, 0x61, 0xb9, 0xca,
+ 0x51, 0x0d, 0x34, 0xc4, 0xba, 0x32, 0xaa, 0x51, 0x2c, 0xe7, 0x58, 0xa9, 0x17, 0x6f, 0x36, 0xc6,
+ 0xcd, 0xb2, 0x59, 0x66, 0x74, 0x69, 0x75, 0xab, 0x2b, 0x33, 0xa4, 0x7e, 0x19, 0xb1, 0xff, 0x1e,
+ 0xdc, 0x71, 0x22, 0xf3, 0x25, 0xb2, 0x5d, 0x68, 0x7f, 0x49, 0x96, 0x15, 0x7a, 0xa4, 0x4f, 0x02,
+ 0x22, 0x4c, 0xc1, 0x5e, 0xc2, 0x56, 0xb9, 0x90, 0xa8, 0xca, 0x44, 0xe6, 0x9e, 0xdd, 0x27, 0x81,
+ 0x23, 0x1e, 0x01, 0xff, 0x3b, 0x01, 0x88, 0x17, 0x12, 0xc7, 0x58, 0x2c, 0x50, 0xb1, 0x43, 0x70,
+ 0x97, 0x49, 0x8a, 0x4b, 0xe5, 0x91, 0xbe, 0x13, 0x6c, 0x1f, 0xfd, 0x3f, 0xd8, 0xb4, 0x36, 0x38,
+ 0xaf, 0x7b, 0x27, 0xad, 0xfb, 0x9f, 0x7b, 0x96, 0x68, 0x88, 0xec, 0x2d, 0x74, 0x94, 0xfe, 0xbf,
+ 0xf2, 0x6c, 0xad, 0xd9, 0x7d, 0xaa, 0x31, 0xe6, 0x1a, 0xd1, 0x03, 0x95, 0xed, 0x43, 0xab, 0x4e,
+ 0xc0, 0xc3, 0x3e, 0x09, 0x76, 0x8e, 0xd8, 0x53, 0x49, 0xbc, 0xca, 0x51, 0xe8, 0x3e, 0x7b, 0x0d,
+ 0xae, 0xca, 0xaa, 0x62, 0x8a, 0xde, 0xad, 0x66, 0xfe, 0x3d, 0x5c, 0xf7, 0x44, 0xc3, 0xf1, 0x0f,
+ 0xa1, 0xad, 0x2d, 0x32, 0x06, 0xad, 0xbb, 0x44, 0x9a, 0x24, 0xba, 0x42, 0xbf, 0x1f, 0xe3, 0xb1,
+ 0x35, 0x68, 0x0a, 0xff, 0x1d, 0xb8, 0xe7, 0x66, 0x91, 0x7f, 0xdf, 0xdd, 0xff, 0x46, 0xa0, 0xab,
+ 0xf1, 0x51, 0x52, 0x4e, 0xe7, 0x58, 0xb0, 0xa8, 0x59, 0x8b, 0x68, 0xb3, 0x7b, 0xcf, 0x4c, 0x68,
+ 0x98, 0x9b, 0x3b, 0x3e, 0x98, 0xb5, 0x9f, 0x33, 0xeb, 0x6c, 0x9a, 0x0d, 0xa0, 0x55, 0xeb, 0x98,
+ 0x0b, 0x36, 0xbf, 0xa6, 0x16, 0xeb, 0x80, 0x73, 0xc1, 0xaf, 0x29, 0xa9, 0x01, 0xc1, 0xa9, 0xad,
+ 0x01, 0xc1, 0xa9, 0x73, 0xf0, 0xaa, 0x61, 0x6e, 0x41, 0x7b, 0x78, 0x3c, 0x19, 0x72, 0x6a, 0xb1,
+ 0x6d, 0xe8, 0x7c, 0xbc, 0x9c, 0x5c, 0xc4, 0x5c, 0x50, 0x52, 0xe3, 0xf1, 0xd9, 0x88, 0x0b, 0x6a,
+ 0x1f, 0xec, 0x83, 0x6b, 0x62, 0x64, 0x3b, 0x00, 0x57, 0xe2, 0x72, 0xc4, 0xe3, 0x53, 0x3e, 0x19,
+ 0x53, 0x8b, 0x75, 0xe1, 0xbf, 0xa1, 0x38, 0xbe, 0x3a, 0x3d, 0x8b, 0x39, 0x25, 0x27, 0xde, 0xfd,
+ 0xba, 0x47, 0x7e, 0xac, 0x7b, 0xe4, 0xd7, 0xba, 0x47, 0xbe, 0xfe, 0xee, 0x59, 0x9f, 0x5c, 0x73,
+ 0xcb, 0xa9, 0xab, 0x2f, 0x31, 0xfa, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe2, 0xad, 0x94, 0xb6, 0x09,
+ 0x03, 0x00, 0x00,
}
diff --git a/src/query/generated/proto/prompb/types.proto b/src/query/generated/proto/prompb/types.proto
index a716cb13c1..68cdf32d7e 100644
--- a/src/query/generated/proto/prompb/types.proto
+++ b/src/query/generated/proto/prompb/types.proto
@@ -12,8 +12,12 @@ message Sample {
}
message TimeSeries {
- repeated Label labels = 1 [(gogoproto.nullable) = false];;
- repeated Sample samples = 2 [(gogoproto.nullable) = false];;
+ repeated Label labels = 1 [(gogoproto.nullable) = false];
+ repeated Sample samples = 2 [(gogoproto.nullable) = false];
+ // NB: These are custom fields that M3 uses. They start at 101 so that they
+ // should never clash with prometheus fields.
+ Type type = 101;
+ Source source = 102;
}
message Label {
@@ -37,3 +41,14 @@ message LabelMatcher {
bytes name = 2;
bytes value = 3;
}
+
+enum Type {
+ GAUGE = 0;
+ COUNTER = 1;
+ TIMER = 2;
+}
+
+enum Source {
+ PROMETHEUS = 0;
+ GRAPHITE = 1;
+}
\ No newline at end of file
diff --git a/src/query/generated/proto/rpcpb/query.pb.go b/src/query/generated/proto/rpcpb/query.pb.go
index f544759dbe..fc91cf5ccf 100644
--- a/src/query/generated/proto/rpcpb/query.pb.go
+++ b/src/query/generated/proto/rpcpb/query.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/m3db/m3/src/query/generated/proto/rpcpb/query.proto
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -858,6 +858,7 @@ type M3Segment struct {
Tail []byte `protobuf:"bytes,2,opt,name=tail,proto3" json:"tail,omitempty"`
StartTime int64 `protobuf:"varint,3,opt,name=startTime,proto3" json:"startTime,omitempty"`
BlockSize int64 `protobuf:"varint,4,opt,name=blockSize,proto3" json:"blockSize,omitempty"`
+ Checksum uint32 `protobuf:"varint,5,opt,name=checksum,proto3" json:"checksum,omitempty"`
}
func (m *M3Segment) Reset() { *m = M3Segment{} }
@@ -893,6 +894,13 @@ func (m *M3Segment) GetBlockSize() int64 {
return 0
}
+func (m *M3Segment) GetChecksum() uint32 {
+ if m != nil {
+ return m.Checksum
+ }
+ return 0
+}
+
type SearchRequest struct {
// Types that are valid to be assigned to Matchers:
// *SearchRequest_TagMatchers
@@ -2626,6 +2634,11 @@ func (m *M3Segment) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintQuery(dAtA, i, uint64(m.BlockSize))
}
+ if m.Checksum != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintQuery(dAtA, i, uint64(m.Checksum))
+ }
return i, nil
}
@@ -3534,6 +3547,9 @@ func (m *M3Segment) Size() (n int) {
if m.BlockSize != 0 {
n += 1 + sovQuery(uint64(m.BlockSize))
}
+ if m.Checksum != 0 {
+ n += 1 + sovQuery(uint64(m.Checksum))
+ }
return n
}
@@ -5976,6 +5992,25 @@ func (m *M3Segment) Unmarshal(dAtA []byte) error {
break
}
}
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType)
+ }
+ m.Checksum = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Checksum |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
@@ -7769,108 +7804,109 @@ func init() {
}
var fileDescriptorQuery = []byte{
- // 1644 bytes of a gzipped FileDescriptorProto
+ // 1662 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0xdb, 0x72, 0x1b, 0x49,
0x19, 0xd6, 0x68, 0xac, 0xd3, 0xaf, 0x83, 0xe5, 0xb6, 0xd9, 0xc8, 0x26, 0x18, 0xd5, 0x00, 0x8b,
0xf1, 0x06, 0x2b, 0x91, 0xb3, 0x2c, 0x4b, 0x15, 0x07, 0xd9, 0x52, 0x6c, 0xd7, 0xda, 0x92, 0xb7,
- 0x35, 0x26, 0x81, 0x82, 0x32, 0xad, 0x51, 0x67, 0x3c, 0x65, 0xcd, 0x61, 0x67, 0x46, 0x4b, 0x9c,
- 0xe2, 0x21, 0x28, 0x8a, 0x27, 0x80, 0x82, 0x27, 0xc8, 0x23, 0x70, 0xc1, 0x25, 0x8f, 0x40, 0x05,
- 0x2e, 0x78, 0x0c, 0xaa, 0x7b, 0x7a, 0x4e, 0x9a, 0x71, 0x25, 0x95, 0xbb, 0xf9, 0xcf, 0xfd, 0xff,
- 0xfd, 0xf5, 0xd7, 0x2d, 0xc1, 0xcf, 0x74, 0xc3, 0xbf, 0x59, 0xce, 0x0e, 0x34, 0xdb, 0xec, 0x99,
- 0x87, 0xf3, 0x59, 0xcf, 0x3c, 0xec, 0x79, 0xae, 0xd6, 0xfb, 0x6a, 0x49, 0xdd, 0xbb, 0x9e, 0x4e,
- 0x2d, 0xea, 0x12, 0x9f, 0xce, 0x7b, 0x8e, 0x6b, 0xfb, 0x76, 0xcf, 0x75, 0x34, 0x67, 0x16, 0xd8,
- 0x0e, 0xb8, 0x06, 0xc9, 0xae, 0xa3, 0xed, 0x0c, 0xef, 0x49, 0x62, 0x52, 0xdf, 0x35, 0x34, 0x2f,
- 0x93, 0xc6, 0xb1, 0x17, 0x86, 0x76, 0xe7, 0xcc, 0xc4, 0x47, 0x90, 0x4a, 0x59, 0x87, 0xe6, 0x29,
- 0x25, 0x0b, 0xff, 0x06, 0xd3, 0xaf, 0x96, 0xd4, 0xf3, 0x95, 0x97, 0xd0, 0x0a, 0x15, 0x9e, 0x63,
- 0x5b, 0x1e, 0x45, 0x1f, 0x43, 0x6b, 0xe9, 0xf8, 0x86, 0x49, 0x87, 0x4b, 0x97, 0xf8, 0x86, 0x6d,
- 0x75, 0xa4, 0xae, 0xb4, 0x57, 0xc3, 0x2b, 0x5a, 0xf4, 0x08, 0x36, 0x02, 0xcd, 0x98, 0x58, 0xb6,
- 0x47, 0x35, 0xdb, 0x9a, 0x7b, 0x9d, 0x62, 0x57, 0xda, 0x93, 0x71, 0xd6, 0xa0, 0xfc, 0x4d, 0x82,
- 0xc6, 0x33, 0xea, 0x6b, 0x61, 0x61, 0xb4, 0x05, 0x25, 0xcf, 0x27, 0xae, 0xcf, 0xb3, 0xcb, 0x38,
- 0x10, 0x50, 0x1b, 0x64, 0x6a, 0xcd, 0x45, 0x1a, 0xf6, 0x89, 0x9e, 0x42, 0xdd, 0x27, 0xfa, 0x05,
- 0xf1, 0xb5, 0x1b, 0xea, 0x7a, 0x1d, 0xb9, 0x2b, 0xed, 0xd5, 0xfb, 0xed, 0x03, 0xd7, 0xd1, 0x0e,
- 0xd4, 0x58, 0x7f, 0x5a, 0xc0, 0x49, 0x37, 0xf4, 0x09, 0x54, 0x6c, 0x87, 0x2d, 0xd3, 0xeb, 0xac,
- 0xf1, 0x88, 0x0d, 0x1e, 0xc1, 0x57, 0x30, 0x09, 0x0c, 0x38, 0xf4, 0x38, 0x02, 0xa8, 0x9a, 0x22,
- 0x50, 0xf9, 0x05, 0xd4, 0x13, 0x69, 0xd1, 0x93, 0x74, 0x75, 0xa9, 0x2b, 0xef, 0xd5, 0xfb, 0xeb,
- 0x2b, 0xd5, 0x53, 0xa5, 0x95, 0xdf, 0x00, 0xc4, 0x26, 0x84, 0x60, 0xcd, 0x22, 0x26, 0xe5, 0x5d,
- 0x36, 0x30, 0xff, 0x66, 0xad, 0x7f, 0x4d, 0x16, 0x4b, 0xca, 0xdb, 0x6c, 0xe0, 0x40, 0x40, 0xdf,
- 0x85, 0x35, 0xff, 0xce, 0xa1, 0xbc, 0xc3, 0x96, 0xe8, 0x50, 0x64, 0x51, 0xef, 0x1c, 0x8a, 0xb9,
- 0x55, 0xf9, 0x6f, 0x51, 0xcc, 0x51, 0x74, 0xc1, 0x92, 0x2d, 0x0c, 0xd3, 0x88, 0xe6, 0xc8, 0x05,
- 0xf4, 0x29, 0x54, 0x5d, 0xea, 0x31, 0x64, 0xf8, 0xbc, 0x4a, 0xbd, 0xbf, 0xcd, 0x13, 0x62, 0xa1,
- 0xfc, 0x92, 0xc1, 0x2b, 0x1c, 0x44, 0xe4, 0x8a, 0xf6, 0xa1, 0xbd, 0xb0, 0xed, 0xdb, 0x19, 0xd1,
- 0x6e, 0xa3, 0xdd, 0x97, 0x79, 0xde, 0x8c, 0x1e, 0x7d, 0x0a, 0x8d, 0xa5, 0x45, 0x74, 0xdd, 0xa5,
- 0x3a, 0x83, 0x1d, 0x9f, 0x73, 0x2b, 0x9c, 0x33, 0xb1, 0xec, 0xa5, 0x1f, 0xe4, 0xc7, 0x29, 0x37,
- 0xf4, 0x04, 0x20, 0x11, 0x54, 0xba, 0x2f, 0x28, 0xe1, 0x84, 0x8e, 0x61, 0x33, 0x96, 0x98, 0xdd,
- 0x34, 0x5e, 0xd3, 0x79, 0xa7, 0x7c, 0x5f, 0x6c, 0x9e, 0x37, 0x83, 0xab, 0x61, 0x69, 0x8b, 0xe5,
- 0x9c, 0x62, 0xea, 0xd9, 0x8b, 0x25, 0xef, 0xad, 0xd2, 0x95, 0xf6, 0xaa, 0x38, 0x6b, 0x50, 0xfe,
- 0x22, 0xc1, 0x56, 0xde, 0xac, 0xd0, 0x10, 0x36, 0xdc, 0xa4, 0x5e, 0x0d, 0xb7, 0xac, 0xde, 0xff,
- 0x28, 0x3b, 0x61, 0xbe, 0x71, 0xd9, 0x80, 0x6c, 0x16, 0xa2, 0x87, 0x40, 0xcd, 0xcb, 0x42, 0x74,
- 0x0f, 0x67, 0x03, 0x94, 0x3f, 0x4b, 0xb0, 0x91, 0x29, 0x87, 0xfa, 0x50, 0x17, 0x9c, 0xc0, 0xd7,
- 0x26, 0x25, 0xe1, 0x14, 0xeb, 0x71, 0xd2, 0x09, 0x7d, 0x01, 0x5b, 0x42, 0x9c, 0xfa, 0xb6, 0x4b,
- 0x74, 0x7a, 0xc9, 0x49, 0x43, 0x40, 0xe7, 0xc1, 0x41, 0x48, 0x26, 0x07, 0x29, 0x33, 0xce, 0x0d,
- 0x52, 0x9e, 0xaf, 0xae, 0x8a, 0xe8, 0x1e, 0x7a, 0x94, 0x00, 0xa4, 0x94, 0x7f, 0x86, 0x13, 0x38,
- 0xe4, 0xe4, 0xe0, 0x1a, 0x4e, 0xa7, 0xd8, 0x95, 0xd9, 0x09, 0xe1, 0x82, 0xf2, 0x5b, 0x68, 0x0a,
- 0x0a, 0x11, 0x54, 0xf5, 0x1d, 0x28, 0x7b, 0xd4, 0x35, 0x68, 0x78, 0x30, 0xeb, 0x3c, 0xe5, 0x94,
- 0xab, 0xb0, 0x30, 0xa1, 0xef, 0xc3, 0x9a, 0x49, 0x7d, 0x22, 0x7a, 0xd9, 0x0c, 0xc7, 0xbb, 0x5c,
- 0xf8, 0x17, 0xd4, 0x27, 0x73, 0xe2, 0x13, 0xcc, 0x1d, 0x94, 0x37, 0x12, 0x94, 0xa7, 0xe9, 0x18,
- 0x29, 0x11, 0x13, 0x98, 0xd2, 0x31, 0xe8, 0xa7, 0xd0, 0x98, 0x53, 0xcd, 0x36, 0x1d, 0x97, 0x7a,
- 0x1e, 0x9d, 0x47, 0x03, 0x63, 0x01, 0xc3, 0x84, 0x21, 0x08, 0x3e, 0x2d, 0xe0, 0x94, 0x3b, 0xfa,
- 0x1c, 0x20, 0x11, 0x2c, 0x27, 0x82, 0x2f, 0x0e, 0x8f, 0xb3, 0xc1, 0x09, 0xe7, 0xa3, 0x8a, 0x20,
- 0x11, 0xe5, 0x05, 0xb4, 0xd2, 0x4b, 0x43, 0x2d, 0x28, 0x1a, 0x73, 0xc1, 0x38, 0x45, 0x63, 0x8e,
- 0x1e, 0x42, 0x8d, 0xb3, 0xab, 0x6a, 0x98, 0x54, 0x50, 0x6b, 0xac, 0x40, 0x1d, 0xa8, 0x50, 0x6b,
- 0xce, 0x6d, 0xc1, 0x51, 0x0f, 0x45, 0x65, 0x06, 0x28, 0xdb, 0x03, 0x3a, 0x00, 0x60, 0x55, 0x1c,
- 0xdb, 0xb0, 0xfc, 0x70, 0xf0, 0xad, 0xa0, 0xe1, 0x50, 0x8d, 0x13, 0x1e, 0xe8, 0x21, 0xac, 0xf9,
- 0x0c, 0xde, 0x45, 0xee, 0x59, 0x0d, 0x77, 0x1d, 0x73, 0xad, 0xf2, 0x73, 0xa8, 0x45, 0x61, 0x6c,
- 0xa1, 0xec, 0xde, 0xf0, 0x7c, 0x62, 0x3a, 0x82, 0xcf, 0x62, 0x45, 0x9a, 0x36, 0x25, 0x41, 0x9b,
- 0x4a, 0x0f, 0x64, 0x95, 0xe8, 0xef, 0xcf, 0xb3, 0xca, 0x2b, 0x40, 0xd9, 0xe1, 0xb2, 0x5b, 0x2f,
- 0xee, 0x94, 0x1f, 0xc7, 0x20, 0xd3, 0x8a, 0x16, 0xfd, 0x84, 0xe1, 0xd8, 0x59, 0x18, 0x1a, 0x09,
- 0x3b, 0xda, 0xcd, 0xec, 0xd7, 0x2f, 0x59, 0x1d, 0x0f, 0x07, 0x6e, 0x38, 0xf2, 0x57, 0x4e, 0x61,
- 0xfb, 0x5e, 0x37, 0xf4, 0x09, 0x54, 0x3d, 0xaa, 0x9b, 0x34, 0x1e, 0xea, 0xba, 0x48, 0x3c, 0x15,
- 0x6a, 0x1c, 0x39, 0x28, 0xbf, 0x03, 0x88, 0xf5, 0xe8, 0x63, 0x28, 0x9b, 0xd4, 0xd5, 0xe9, 0x5c,
- 0xe0, 0xb5, 0x95, 0x0e, 0xc4, 0xc2, 0x8a, 0xf6, 0xa1, 0xba, 0xb4, 0x84, 0x67, 0x31, 0xb1, 0x6f,
- 0xb1, 0x67, 0x64, 0x57, 0x6c, 0xa8, 0x45, 0x6a, 0x36, 0xdc, 0x1b, 0x4a, 0x42, 0x48, 0xf1, 0x6f,
- 0xa6, 0xf3, 0x89, 0xb1, 0x10, 0xb3, 0xe5, 0xdf, 0x69, 0xa0, 0xc9, 0xab, 0x40, 0x7b, 0x08, 0xb5,
- 0xd9, 0xc2, 0xd6, 0x6e, 0xa7, 0xc6, 0x6b, 0xca, 0xc9, 0x4e, 0xc6, 0xb1, 0x42, 0xf9, 0xbb, 0x04,
- 0xcd, 0x29, 0x25, 0x6e, 0xfc, 0x42, 0x78, 0xba, 0x7a, 0xf7, 0xbe, 0xd7, 0xcd, 0x1f, 0xbd, 0x2b,
- 0x8a, 0x39, 0xef, 0x0a, 0x39, 0x7e, 0x57, 0x7c, 0xf0, 0x0b, 0xe1, 0x04, 0x9a, 0x17, 0x87, 0x2a,
- 0xd1, 0x2f, 0x5d, 0xdb, 0xa1, 0xae, 0x7f, 0x97, 0x39, 0x6e, 0x59, 0x28, 0x15, 0xf3, 0xa0, 0xa4,
- 0x8c, 0x60, 0x3d, 0x99, 0x88, 0xa1, 0xb0, 0x0f, 0xe0, 0x44, 0x92, 0x80, 0x01, 0x12, 0x7b, 0x94,
- 0x28, 0x89, 0x13, 0x5e, 0xca, 0x67, 0xfc, 0xc5, 0x12, 0xad, 0xa6, 0x0d, 0xf2, 0x2d, 0xbd, 0x13,
- 0xcb, 0x61, 0x9f, 0xe8, 0x23, 0x28, 0x73, 0xe4, 0x87, 0xeb, 0x10, 0x92, 0x32, 0x80, 0x66, 0xba,
- 0xfa, 0xe3, 0x9c, 0xea, 0xd1, 0xbc, 0x73, 0x6b, 0xbf, 0x91, 0x18, 0xf9, 0x04, 0x9b, 0x26, 0x38,
- 0xf9, 0xc7, 0x2b, 0x8c, 0x18, 0x6c, 0x1b, 0x5a, 0x49, 0x93, 0x47, 0x86, 0x3f, 0x4a, 0x91, 0x61,
- 0xc0, 0xa4, 0x5b, 0x99, 0xe6, 0x33, 0x4c, 0x18, 0x91, 0xb5, 0xfc, 0x0e, 0x82, 0x8f, 0x29, 0xf3,
- 0x1f, 0x12, 0xec, 0xb0, 0x73, 0xb8, 0xa0, 0x3e, 0xe5, 0x97, 0x6b, 0x80, 0xb8, 0xf0, 0x8e, 0xff,
- 0x81, 0x78, 0x89, 0x05, 0x57, 0xe7, 0x37, 0x78, 0xc2, 0xa4, 0x7b, 0xfc, 0x1c, 0x63, 0x7b, 0xfd,
- 0xd2, 0x58, 0xf8, 0xd4, 0x1d, 0x13, 0x93, 0xaa, 0x21, 0xcd, 0x35, 0xf0, 0x8a, 0x36, 0x46, 0xa5,
- 0x9c, 0x83, 0xca, 0xb5, 0x5c, 0x54, 0x96, 0xde, 0x85, 0x4a, 0xe5, 0x4f, 0x12, 0x6c, 0xe6, 0xb4,
- 0xf1, 0x81, 0x07, 0xe7, 0xf3, 0xb8, 0x74, 0x30, 0xfb, 0x6f, 0x67, 0x1a, 0x4f, 0xcf, 0x29, 0xff,
- 0x78, 0x74, 0xa1, 0xaa, 0x12, 0x9d, 0x35, 0xce, 0xbb, 0x66, 0x44, 0x1c, 0x60, 0xa9, 0x81, 0x03,
- 0x41, 0x79, 0xca, 0x3d, 0x38, 0xfb, 0xbd, 0x03, 0xad, 0x72, 0x02, 0xad, 0x7d, 0xa8, 0x85, 0x51,
- 0x1e, 0xfa, 0x5e, 0xe4, 0x14, 0xa0, 0xb4, 0x19, 0x36, 0xc7, 0xed, 0x51, 0xcc, 0x5f, 0x25, 0xd8,
- 0x4a, 0xaf, 0x5f, 0x80, 0x74, 0x1f, 0x2a, 0x73, 0xfa, 0x92, 0x2c, 0x17, 0x7e, 0x8a, 0x32, 0xa3,
- 0x02, 0xa7, 0x05, 0x1c, 0x3a, 0xa0, 0x1f, 0x42, 0x8d, 0xaf, 0x7b, 0x62, 0x2d, 0xc2, 0x07, 0x51,
- 0x54, 0x8e, 0xb7, 0x79, 0x5a, 0xc0, 0xb1, 0xc7, 0x07, 0xa0, 0xf1, 0x0f, 0xd0, 0x4a, 0x3b, 0xa0,
- 0x5d, 0x00, 0xfa, 0xea, 0x86, 0x2c, 0x3d, 0xdf, 0xf8, 0x3a, 0x80, 0x61, 0x15, 0x27, 0x34, 0x68,
- 0x0f, 0xaa, 0xbf, 0x27, 0xae, 0x65, 0x58, 0xd1, 0xb5, 0xda, 0xe0, 0x75, 0x9e, 0x07, 0x4a, 0x1c,
- 0x59, 0x51, 0x17, 0xea, 0x6e, 0xf4, 0xaa, 0x65, 0xbf, 0x9e, 0xe4, 0x3d, 0x19, 0x27, 0x55, 0xca,
- 0x67, 0x50, 0x11, 0x61, 0xb9, 0x77, 0x68, 0x07, 0x2a, 0x26, 0xf5, 0x3c, 0xa2, 0x87, 0xb7, 0x68,
- 0x28, 0xee, 0x53, 0xa8, 0x27, 0x7e, 0x9e, 0xa0, 0x1a, 0x94, 0x46, 0x5f, 0x5e, 0x0d, 0xce, 0xdb,
- 0x05, 0xd4, 0x80, 0xea, 0x78, 0xa2, 0x06, 0x92, 0x84, 0x00, 0xca, 0x78, 0x74, 0x32, 0x7a, 0x71,
- 0xd9, 0x2e, 0xa2, 0x26, 0xd4, 0xc6, 0x13, 0x55, 0x88, 0x32, 0x33, 0x8d, 0x5e, 0x9c, 0x4d, 0xd5,
- 0x69, 0x7b, 0x4d, 0x98, 0x84, 0x58, 0x42, 0x15, 0x90, 0x07, 0xe7, 0xe7, 0xed, 0xf2, 0xbe, 0x06,
- 0xf5, 0xc4, 0xb3, 0x15, 0x75, 0x60, 0xeb, 0x6a, 0xfc, 0xc5, 0x78, 0xf2, 0x7c, 0x7c, 0x7d, 0x31,
- 0x52, 0xf1, 0xd9, 0xf1, 0xf4, 0x5a, 0xfd, 0xd5, 0xe5, 0xa8, 0x5d, 0x40, 0xdf, 0x82, 0xed, 0xab,
- 0xf1, 0xe0, 0xe4, 0x04, 0x8f, 0x4e, 0x06, 0xea, 0x68, 0x98, 0x36, 0x4b, 0xe8, 0x9b, 0xf0, 0xe0,
- 0x3e, 0x63, 0x71, 0xff, 0x0c, 0x1a, 0xc9, 0x5f, 0x10, 0x08, 0x41, 0x6b, 0x38, 0x7a, 0x36, 0xb8,
- 0x3a, 0x57, 0xaf, 0x27, 0x97, 0xea, 0xd9, 0x64, 0xdc, 0x2e, 0xa0, 0x0d, 0x68, 0x3e, 0x9b, 0xe0,
- 0xe3, 0xd1, 0xf5, 0x68, 0x3c, 0x38, 0x3a, 0x1f, 0x0d, 0xdb, 0x12, 0x73, 0x0b, 0x54, 0xc3, 0xb3,
- 0x69, 0xa0, 0x2b, 0xee, 0x3f, 0x82, 0xf6, 0x2a, 0x57, 0xa0, 0x3a, 0x54, 0x44, 0xba, 0x76, 0x81,
- 0x09, 0xea, 0xe0, 0x64, 0x3c, 0xb8, 0x18, 0xb5, 0xa5, 0xfe, 0xff, 0x24, 0x28, 0xf1, 0x47, 0x32,
- 0x7a, 0x02, 0xe5, 0xe0, 0x87, 0x38, 0x0a, 0xb8, 0x32, 0xf5, 0x33, 0x7d, 0x67, 0x33, 0xa5, 0x13,
- 0x28, 0x7e, 0x0c, 0x25, 0x4e, 0x0c, 0x28, 0x41, 0x12, 0x61, 0x00, 0x4a, 0xaa, 0x02, 0xff, 0xc7,
- 0x12, 0x3a, 0x64, 0x2f, 0x5c, 0x46, 0xd7, 0xa2, 0x48, 0xea, 0xc2, 0xdd, 0xd9, 0x4c, 0xe9, 0xa2,
- 0xa0, 0x11, 0x34, 0x92, 0x1d, 0xa1, 0xce, 0x7d, 0xbc, 0xb0, 0xb3, 0x9d, 0x63, 0x09, 0xd3, 0x1c,
- 0x3d, 0xf8, 0xe7, 0xdb, 0x5d, 0xe9, 0x5f, 0x6f, 0x77, 0xa5, 0x7f, 0xbf, 0xdd, 0x95, 0xfe, 0xf8,
- 0x9f, 0xdd, 0xc2, 0xaf, 0x4b, 0xfc, 0xaf, 0x8e, 0x59, 0x99, 0xff, 0x35, 0x71, 0xf8, 0xff, 0x00,
- 0x00, 0x00, 0xff, 0xff, 0xa3, 0x81, 0x48, 0xf7, 0x27, 0x11, 0x00, 0x00,
+ 0x35, 0x26, 0x81, 0x82, 0x32, 0xad, 0x51, 0x67, 0x3c, 0x65, 0xcd, 0x61, 0x67, 0x46, 0xcb, 0x3a,
+ 0xc5, 0x1b, 0x70, 0x43, 0x51, 0x3c, 0x01, 0x14, 0x3c, 0x41, 0x1e, 0x81, 0x0b, 0x2e, 0x79, 0x04,
+ 0x2a, 0x70, 0xc1, 0x63, 0x6c, 0x75, 0x4f, 0xcf, 0x49, 0x33, 0xae, 0xa4, 0x72, 0x37, 0xff, 0xb9,
+ 0xff, 0xbf, 0xbf, 0xfe, 0xba, 0x25, 0xf8, 0x99, 0x6e, 0xf8, 0x37, 0xcb, 0xd9, 0x81, 0x66, 0x9b,
+ 0x3d, 0xf3, 0x70, 0x3e, 0xeb, 0x99, 0x87, 0x3d, 0xcf, 0xd5, 0x7a, 0x5f, 0x2c, 0xa9, 0x7b, 0xd7,
+ 0xd3, 0xa9, 0x45, 0x5d, 0xe2, 0xd3, 0x79, 0xcf, 0x71, 0x6d, 0xdf, 0xee, 0xb9, 0x8e, 0xe6, 0xcc,
+ 0x02, 0xdb, 0x01, 0xd7, 0x20, 0xd9, 0x75, 0xb4, 0x9d, 0xe1, 0x3d, 0x49, 0x4c, 0xea, 0xbb, 0x86,
+ 0xe6, 0x65, 0xd2, 0x38, 0xf6, 0xc2, 0xd0, 0xee, 0x9c, 0x99, 0xf8, 0x08, 0x52, 0x29, 0xeb, 0xd0,
+ 0x3c, 0xa5, 0x64, 0xe1, 0xdf, 0x60, 0xfa, 0xc5, 0x92, 0x7a, 0xbe, 0xf2, 0x12, 0x5a, 0xa1, 0xc2,
+ 0x73, 0x6c, 0xcb, 0xa3, 0xe8, 0x43, 0x68, 0x2d, 0x1d, 0xdf, 0x30, 0xe9, 0x70, 0xe9, 0x12, 0xdf,
+ 0xb0, 0xad, 0x8e, 0xd4, 0x95, 0xf6, 0x6a, 0x78, 0x45, 0x8b, 0x1e, 0xc1, 0x46, 0xa0, 0x19, 0x13,
+ 0xcb, 0xf6, 0xa8, 0x66, 0x5b, 0x73, 0xaf, 0x53, 0xec, 0x4a, 0x7b, 0x32, 0xce, 0x1a, 0x94, 0xbf,
+ 0x4b, 0xd0, 0x78, 0x46, 0x7d, 0x2d, 0x2c, 0x8c, 0xb6, 0xa0, 0xe4, 0xf9, 0xc4, 0xf5, 0x79, 0x76,
+ 0x19, 0x07, 0x02, 0x6a, 0x83, 0x4c, 0xad, 0xb9, 0x48, 0xc3, 0x3e, 0xd1, 0x53, 0xa8, 0xfb, 0x44,
+ 0xbf, 0x20, 0xbe, 0x76, 0x43, 0x5d, 0xaf, 0x23, 0x77, 0xa5, 0xbd, 0x7a, 0xbf, 0x7d, 0xe0, 0x3a,
+ 0xda, 0x81, 0x1a, 0xeb, 0x4f, 0x0b, 0x38, 0xe9, 0x86, 0x3e, 0x82, 0x8a, 0xed, 0xb0, 0x65, 0x7a,
+ 0x9d, 0x35, 0x1e, 0xb1, 0xc1, 0x23, 0xf8, 0x0a, 0x26, 0x81, 0x01, 0x87, 0x1e, 0x47, 0x00, 0x55,
+ 0x53, 0x04, 0x2a, 0xbf, 0x80, 0x7a, 0x22, 0x2d, 0x7a, 0x92, 0xae, 0x2e, 0x75, 0xe5, 0xbd, 0x7a,
+ 0x7f, 0x7d, 0xa5, 0x7a, 0xaa, 0xb4, 0xf2, 0x1b, 0x80, 0xd8, 0x84, 0x10, 0xac, 0x59, 0xc4, 0xa4,
+ 0xbc, 0xcb, 0x06, 0xe6, 0xdf, 0xac, 0xf5, 0x2f, 0xc9, 0x62, 0x49, 0x79, 0x9b, 0x0d, 0x1c, 0x08,
+ 0xe8, 0xbb, 0xb0, 0xe6, 0xdf, 0x39, 0x94, 0x77, 0xd8, 0x12, 0x1d, 0x8a, 0x2c, 0xea, 0x9d, 0x43,
+ 0x31, 0xb7, 0x2a, 0xff, 0x2b, 0x8a, 0x39, 0x8a, 0x2e, 0x58, 0xb2, 0x85, 0x61, 0x1a, 0xd1, 0x1c,
+ 0xb9, 0x80, 0x3e, 0x86, 0xaa, 0x4b, 0x3d, 0x86, 0x0c, 0x9f, 0x57, 0xa9, 0xf7, 0xb7, 0x79, 0x42,
+ 0x2c, 0x94, 0x9f, 0x33, 0x78, 0x85, 0x83, 0x88, 0x5c, 0xd1, 0x3e, 0xb4, 0x17, 0xb6, 0x7d, 0x3b,
+ 0x23, 0xda, 0x6d, 0xb4, 0xfb, 0x32, 0xcf, 0x9b, 0xd1, 0xa3, 0x8f, 0xa1, 0xb1, 0xb4, 0x88, 0xae,
+ 0xbb, 0x54, 0x67, 0xb0, 0xe3, 0x73, 0x6e, 0x85, 0x73, 0x26, 0x96, 0xbd, 0xf4, 0x83, 0xfc, 0x38,
+ 0xe5, 0x86, 0x9e, 0x00, 0x24, 0x82, 0x4a, 0xf7, 0x05, 0x25, 0x9c, 0xd0, 0x31, 0x6c, 0xc6, 0x12,
+ 0xb3, 0x9b, 0xc6, 0x2b, 0x3a, 0xef, 0x94, 0xef, 0x8b, 0xcd, 0xf3, 0x66, 0x70, 0x35, 0x2c, 0x6d,
+ 0xb1, 0x9c, 0x53, 0x4c, 0x3d, 0x7b, 0xb1, 0xe4, 0xbd, 0x55, 0xba, 0xd2, 0x5e, 0x15, 0x67, 0x0d,
+ 0xca, 0x5f, 0x25, 0xd8, 0xca, 0x9b, 0x15, 0x1a, 0xc2, 0x86, 0x9b, 0xd4, 0xab, 0xe1, 0x96, 0xd5,
+ 0xfb, 0x1f, 0x64, 0x27, 0xcc, 0x37, 0x2e, 0x1b, 0x90, 0xcd, 0x42, 0xf4, 0x10, 0xa8, 0x79, 0x59,
+ 0x88, 0xee, 0xe1, 0x6c, 0x80, 0xf2, 0x17, 0x09, 0x36, 0x32, 0xe5, 0x50, 0x1f, 0xea, 0x82, 0x13,
+ 0xf8, 0xda, 0xa4, 0x24, 0x9c, 0x62, 0x3d, 0x4e, 0x3a, 0xa1, 0xcf, 0x60, 0x4b, 0x88, 0x53, 0xdf,
+ 0x76, 0x89, 0x4e, 0x2f, 0x39, 0x69, 0x08, 0xe8, 0x3c, 0x38, 0x08, 0xc9, 0xe4, 0x20, 0x65, 0xc6,
+ 0xb9, 0x41, 0xca, 0xf3, 0xd5, 0x55, 0x11, 0xdd, 0x43, 0x8f, 0x12, 0x80, 0x94, 0xf2, 0xcf, 0x70,
+ 0x02, 0x87, 0x9c, 0x1c, 0x5c, 0xc3, 0xe9, 0x14, 0xbb, 0x32, 0x3b, 0x21, 0x5c, 0x50, 0x7e, 0x0b,
+ 0x4d, 0x41, 0x21, 0x82, 0xaa, 0xbe, 0x03, 0x65, 0x8f, 0xba, 0x06, 0x0d, 0x0f, 0x66, 0x9d, 0xa7,
+ 0x9c, 0x72, 0x15, 0x16, 0x26, 0xf4, 0x7d, 0x58, 0x33, 0xa9, 0x4f, 0x44, 0x2f, 0x9b, 0xe1, 0x78,
+ 0x97, 0x0b, 0xff, 0x82, 0xfa, 0x64, 0x4e, 0x7c, 0x82, 0xb9, 0x83, 0xf2, 0x5a, 0x82, 0xf2, 0x34,
+ 0x1d, 0x23, 0x25, 0x62, 0x02, 0x53, 0x3a, 0x06, 0xfd, 0x14, 0x1a, 0x73, 0xaa, 0xd9, 0xa6, 0xe3,
+ 0x52, 0xcf, 0xa3, 0xf3, 0x68, 0x60, 0x2c, 0x60, 0x98, 0x30, 0x04, 0xc1, 0xa7, 0x05, 0x9c, 0x72,
+ 0x47, 0x9f, 0x02, 0x24, 0x82, 0xe5, 0x44, 0xf0, 0xc5, 0xe1, 0x71, 0x36, 0x38, 0xe1, 0x7c, 0x54,
+ 0x11, 0x24, 0xa2, 0xbc, 0x80, 0x56, 0x7a, 0x69, 0xa8, 0x05, 0x45, 0x63, 0x2e, 0x18, 0xa7, 0x68,
+ 0xcc, 0xd1, 0x43, 0xa8, 0x71, 0x76, 0x55, 0x0d, 0x93, 0x0a, 0x6a, 0x8d, 0x15, 0xa8, 0x03, 0x15,
+ 0x6a, 0xcd, 0xb9, 0x2d, 0x38, 0xea, 0xa1, 0xa8, 0xcc, 0x00, 0x65, 0x7b, 0x40, 0x07, 0x00, 0xac,
+ 0x8a, 0x63, 0x1b, 0x96, 0x1f, 0x0e, 0xbe, 0x15, 0x34, 0x1c, 0xaa, 0x71, 0xc2, 0x03, 0x3d, 0x84,
+ 0x35, 0x9f, 0xc1, 0xbb, 0xc8, 0x3d, 0xab, 0xe1, 0xae, 0x63, 0xae, 0x55, 0x7e, 0x0e, 0xb5, 0x28,
+ 0x8c, 0x2d, 0x94, 0xdd, 0x1b, 0x9e, 0x4f, 0x4c, 0x47, 0xf0, 0x59, 0xac, 0x48, 0xd3, 0xa6, 0x24,
+ 0x68, 0x53, 0xe9, 0x81, 0xac, 0x12, 0xfd, 0xdd, 0x79, 0x56, 0xf9, 0x0a, 0x50, 0x76, 0xb8, 0xec,
+ 0xd6, 0x8b, 0x3b, 0xe5, 0xc7, 0x31, 0xc8, 0xb4, 0xa2, 0x45, 0x3f, 0x61, 0x38, 0x76, 0x16, 0x86,
+ 0x46, 0xc2, 0x8e, 0x76, 0x33, 0xfb, 0xf5, 0x4b, 0x56, 0xc7, 0xc3, 0x81, 0x1b, 0x8e, 0xfc, 0x95,
+ 0x53, 0xd8, 0xbe, 0xd7, 0x0d, 0x7d, 0x04, 0x55, 0x8f, 0xea, 0x26, 0x8d, 0x87, 0xba, 0x2e, 0x12,
+ 0x4f, 0x85, 0x1a, 0x47, 0x0e, 0xca, 0xef, 0x00, 0x62, 0x3d, 0xfa, 0x10, 0xca, 0x26, 0x75, 0x75,
+ 0x3a, 0x17, 0x78, 0x6d, 0xa5, 0x03, 0xb1, 0xb0, 0xa2, 0x7d, 0xa8, 0x2e, 0x2d, 0xe1, 0x59, 0x4c,
+ 0xec, 0x5b, 0xec, 0x19, 0xd9, 0x95, 0x3f, 0x4a, 0x50, 0x8b, 0xf4, 0x6c, 0xba, 0x37, 0x94, 0x84,
+ 0x98, 0xe2, 0xdf, 0x4c, 0xe7, 0x13, 0x63, 0x21, 0x86, 0xcb, 0xbf, 0xd3, 0x48, 0x93, 0x57, 0x91,
+ 0xf6, 0x10, 0x6a, 0xb3, 0x85, 0xad, 0xdd, 0x4e, 0x8d, 0x57, 0x94, 0xb3, 0x9d, 0x8c, 0x63, 0x05,
+ 0xda, 0x81, 0xaa, 0x76, 0x43, 0xb5, 0x5b, 0x6f, 0x69, 0xf2, 0x6b, 0xa1, 0x89, 0x23, 0x59, 0xf9,
+ 0x87, 0x04, 0xcd, 0x29, 0x25, 0x6e, 0xfc, 0x7c, 0x78, 0xba, 0x7a, 0x31, 0xbf, 0xd3, 0xb3, 0x20,
+ 0x7a, 0x74, 0x14, 0x73, 0x1e, 0x1d, 0x72, 0xfc, 0xe8, 0x78, 0xef, 0xe7, 0xc3, 0x09, 0x34, 0x2f,
+ 0x0e, 0x55, 0xa2, 0x5f, 0xba, 0xb6, 0x43, 0x5d, 0xff, 0x2e, 0x73, 0x16, 0xb3, 0x38, 0x2b, 0xe6,
+ 0xe1, 0x4c, 0x19, 0xc1, 0x7a, 0x32, 0x11, 0x83, 0x68, 0x1f, 0xc0, 0x89, 0x24, 0x81, 0x11, 0x24,
+ 0x36, 0x30, 0x51, 0x12, 0x27, 0xbc, 0x94, 0x4f, 0xf8, 0x73, 0x26, 0x5a, 0x4d, 0x1b, 0xe4, 0x5b,
+ 0x7a, 0x27, 0x96, 0xc3, 0x3e, 0xd1, 0x07, 0x50, 0xe6, 0xc7, 0x22, 0x5c, 0x87, 0x90, 0x94, 0x01,
+ 0x34, 0xd3, 0xd5, 0x1f, 0xe7, 0x54, 0x8f, 0xe6, 0x9d, 0x5b, 0xfb, 0xb5, 0xc4, 0x98, 0x29, 0xd8,
+ 0x34, 0x41, 0xd8, 0x3f, 0x5e, 0xa1, 0xcb, 0x60, 0xdb, 0xd0, 0x4a, 0x9a, 0x3c, 0xa6, 0xfc, 0x51,
+ 0x8a, 0x29, 0x03, 0x9a, 0xdd, 0xca, 0x34, 0x9f, 0xa1, 0xc9, 0x88, 0xc9, 0xe5, 0xb7, 0xb0, 0x7f,
+ 0xcc, 0xa7, 0xff, 0x94, 0x60, 0x87, 0x1d, 0xd2, 0x05, 0xf5, 0x29, 0xbf, 0x79, 0x03, 0xc4, 0x85,
+ 0x0f, 0x80, 0x1f, 0x88, 0x67, 0x5a, 0x70, 0xaf, 0x7e, 0x83, 0x27, 0x4c, 0xba, 0xc7, 0x6f, 0x35,
+ 0xb6, 0xd7, 0x2f, 0x8d, 0x85, 0x4f, 0xdd, 0x31, 0x31, 0xa9, 0x1a, 0x72, 0x60, 0x03, 0xaf, 0x68,
+ 0x63, 0x54, 0xca, 0x39, 0xa8, 0x5c, 0xcb, 0x45, 0x65, 0xe9, 0x6d, 0xa8, 0x54, 0xfe, 0x2c, 0xc1,
+ 0x66, 0x4e, 0x1b, 0xef, 0x79, 0x70, 0x3e, 0x8d, 0x4b, 0x07, 0xb3, 0xff, 0x76, 0xa6, 0xf1, 0xf4,
+ 0x9c, 0xf2, 0x8f, 0x47, 0x17, 0xaa, 0x2a, 0xd1, 0x59, 0xe3, 0xbc, 0x6b, 0xc6, 0xd2, 0x01, 0x96,
+ 0x1a, 0x38, 0x10, 0x94, 0xa7, 0xdc, 0x83, 0x53, 0xe3, 0x5b, 0xd0, 0x2a, 0x27, 0xd0, 0xda, 0x87,
+ 0x5a, 0x18, 0xe5, 0xa1, 0xef, 0x45, 0x4e, 0x01, 0x4a, 0x9b, 0x61, 0x73, 0xdc, 0x1e, 0xc5, 0xfc,
+ 0x4d, 0x82, 0xad, 0xf4, 0xfa, 0x05, 0x48, 0xf7, 0xa1, 0x32, 0xa7, 0x2f, 0xc9, 0x72, 0xe1, 0xa7,
+ 0xf8, 0x34, 0x2a, 0x70, 0x5a, 0xc0, 0xa1, 0x03, 0xfa, 0x21, 0xd4, 0xf8, 0xba, 0x27, 0xd6, 0x22,
+ 0x7c, 0x2d, 0x45, 0xe5, 0x78, 0x9b, 0xa7, 0x05, 0x1c, 0x7b, 0xbc, 0x07, 0x1a, 0xff, 0x00, 0xad,
+ 0xb4, 0x03, 0xda, 0x05, 0xa0, 0x5f, 0xdd, 0x90, 0xa5, 0xe7, 0x1b, 0x5f, 0x06, 0x30, 0xac, 0xe2,
+ 0x84, 0x06, 0xed, 0x41, 0xf5, 0xf7, 0xc4, 0xb5, 0x0c, 0x2b, 0xba, 0x73, 0x1b, 0xbc, 0xce, 0xf3,
+ 0x40, 0x89, 0x23, 0x2b, 0xea, 0x42, 0xdd, 0x8d, 0x9e, 0xbc, 0xec, 0xa7, 0x95, 0xbc, 0x27, 0xe3,
+ 0xa4, 0x4a, 0xf9, 0x04, 0x2a, 0x22, 0x2c, 0xf7, 0x82, 0xed, 0x40, 0xc5, 0xa4, 0x9e, 0x47, 0xf4,
+ 0xf0, 0x8a, 0x0d, 0xc5, 0x7d, 0x0a, 0xf5, 0xc4, 0x6f, 0x17, 0x54, 0x83, 0xd2, 0xe8, 0xf3, 0xab,
+ 0xc1, 0x79, 0xbb, 0x80, 0x1a, 0x50, 0x1d, 0x4f, 0xd4, 0x40, 0x92, 0x10, 0x40, 0x19, 0x8f, 0x4e,
+ 0x46, 0x2f, 0x2e, 0xdb, 0x45, 0xd4, 0x84, 0xda, 0x78, 0xa2, 0x0a, 0x51, 0x66, 0xa6, 0xd1, 0x8b,
+ 0xb3, 0xa9, 0x3a, 0x6d, 0xaf, 0x09, 0x93, 0x10, 0x4b, 0xa8, 0x02, 0xf2, 0xe0, 0xfc, 0xbc, 0x5d,
+ 0xde, 0xd7, 0xa0, 0x9e, 0x78, 0xd3, 0xa2, 0x0e, 0x6c, 0x5d, 0x8d, 0x3f, 0x1b, 0x4f, 0x9e, 0x8f,
+ 0xaf, 0x2f, 0x46, 0x2a, 0x3e, 0x3b, 0x9e, 0x5e, 0xab, 0xbf, 0xba, 0x1c, 0xb5, 0x0b, 0xe8, 0x5b,
+ 0xb0, 0x7d, 0x35, 0x1e, 0x9c, 0x9c, 0xe0, 0xd1, 0xc9, 0x40, 0x1d, 0x0d, 0xd3, 0x66, 0x09, 0x7d,
+ 0x13, 0x1e, 0xdc, 0x67, 0x2c, 0xee, 0x9f, 0x41, 0x23, 0xf9, 0xf3, 0x02, 0x21, 0x68, 0x0d, 0x47,
+ 0xcf, 0x06, 0x57, 0xe7, 0xea, 0xf5, 0xe4, 0x52, 0x3d, 0x9b, 0x8c, 0xdb, 0x05, 0xb4, 0x01, 0xcd,
+ 0x67, 0x13, 0x7c, 0x3c, 0xba, 0x1e, 0x8d, 0x07, 0x47, 0xe7, 0xa3, 0x61, 0x5b, 0x62, 0x6e, 0x81,
+ 0x6a, 0x78, 0x36, 0x0d, 0x74, 0xc5, 0xfd, 0x47, 0xd0, 0x5e, 0xe5, 0x0a, 0x54, 0x87, 0x8a, 0x48,
+ 0xd7, 0x2e, 0x30, 0x41, 0x1d, 0x9c, 0x8c, 0x07, 0x17, 0xa3, 0xb6, 0xd4, 0xff, 0xbf, 0x04, 0x25,
+ 0xfe, 0x82, 0x46, 0x4f, 0xa0, 0x1c, 0xfc, 0x4a, 0x47, 0x01, 0x57, 0xa6, 0x7e, 0xc3, 0xef, 0x6c,
+ 0xa6, 0x74, 0x02, 0xc5, 0x8f, 0xa1, 0xc4, 0x89, 0x01, 0x25, 0x48, 0x22, 0x0c, 0x40, 0x49, 0x55,
+ 0xe0, 0xff, 0x58, 0x42, 0x87, 0xec, 0xf9, 0xcb, 0xe8, 0x5a, 0x14, 0x49, 0x5d, 0xb8, 0x3b, 0x9b,
+ 0x29, 0x5d, 0x14, 0x34, 0x82, 0x46, 0xb2, 0x23, 0xd4, 0xb9, 0x8f, 0x17, 0x76, 0xb6, 0x73, 0x2c,
+ 0x61, 0x9a, 0xa3, 0x07, 0xff, 0x7a, 0xb3, 0x2b, 0xfd, 0xfb, 0xcd, 0xae, 0xf4, 0x9f, 0x37, 0xbb,
+ 0xd2, 0x9f, 0xfe, 0xbb, 0x5b, 0xf8, 0x75, 0x89, 0xff, 0x0f, 0x32, 0x2b, 0xf3, 0xff, 0x2d, 0x0e,
+ 0xbf, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x18, 0x0b, 0xe1, 0x7c, 0x44, 0x11, 0x00, 0x00,
}
diff --git a/src/query/generated/proto/rpcpb/query.proto b/src/query/generated/proto/rpcpb/query.proto
index 3f875052a1..a5bb08782f 100644
--- a/src/query/generated/proto/rpcpb/query.proto
+++ b/src/query/generated/proto/rpcpb/query.proto
@@ -144,6 +144,7 @@ message M3Segment {
bytes tail = 2;
int64 startTime = 3;
int64 blockSize = 4;
+ uint32 checksum = 5;
}
message SearchRequest {
diff --git a/src/query/graphite/common/test_util.go b/src/query/graphite/common/test_util.go
index 893cf4718a..3317c0904b 100644
--- a/src/query/graphite/common/test_util.go
+++ b/src/query/graphite/common/test_util.go
@@ -21,6 +21,7 @@
package common
import (
+ "fmt"
"math"
"testing"
"time"
@@ -43,7 +44,7 @@ type TestSeries struct {
// NewTestContext creates a new test context.
func NewTestContext() *Context {
- now := time.Now()
+ now := time.Now().Truncate(time.Hour)
return NewContext(ContextOptions{Start: now.Add(-time.Hour), End: now})
}
@@ -95,22 +96,27 @@ func CompareOutputsAndExpected(t *testing.T, step int, start time.Time, expected
a := actual[i]
require.Equal(t, expected[i].Name, a.Name())
assert.Equal(t, step, a.MillisPerStep(), a.Name()+": MillisPerStep in expected series do not match MillisPerStep in actual")
- assert.Equal(t, start, a.StartTime(), a.Name()+": StartTime in expected series does not match StartTime in actual")
+ diff := time.Duration(math.Abs(float64(start.Sub(a.StartTime()))))
+ assert.True(t, diff < time.Millisecond,
+ fmt.Sprintf("%s: StartTime in expected series (%v) does not match StartTime in actual (%v), diff %v",
+ a.Name(), start, a.StartTime(), diff))
e := expected[i].Data
require.Equal(t, len(e), a.Len(), a.Name()+": length of expected series does not match length of actual")
for step := 0; step < a.Len(); step++ {
v := a.ValueAt(step)
if math.IsNaN(e[step]) {
- assert.True(t, math.IsNaN(v), a.Name()+": invalid value for step %d/%d, should be NaN but is %v", step, a.Len(), v)
+ assert.True(t, math.IsNaN(v), fmt.Sprintf("%s: invalid value for step %d/%d, should be NaN but is %v", a.Name(), 1+step, a.Len(), v))
+ } else if math.IsNaN(v) {
+ assert.Fail(t, fmt.Sprintf("%s: invalid value for step %d/%d, should be %v but is NaN ", a.Name(), 1+step, a.Len(), e[step]))
} else {
- xtest.InDeltaWithNaNs(t, e[step], v, 0.0001, a.Name()+": invalid value for %d/%d", step, a.Len())
+ xtest.InDeltaWithNaNs(t, e[step], v, 0.0001, a.Name()+": invalid value for %d/%d", 1+step, a.Len())
}
}
}
}
-// MovingAverageStorage is a special test construct for the moving average function
-type MovingAverageStorage struct {
+// MovingFunctionStorage is a special test construct for all moving functions
+type MovingFunctionStorage struct {
StepMillis int
Bootstrap []float64
Values []float64
@@ -118,7 +124,7 @@ type MovingAverageStorage struct {
}
// FetchByPath builds a new series from the input path
-func (s *MovingAverageStorage) FetchByPath(
+func (s *MovingFunctionStorage) FetchByPath(
ctx context.Context,
path string,
opts storage.FetchOptions,
@@ -127,7 +133,7 @@ func (s *MovingAverageStorage) FetchByPath(
}
// FetchByQuery builds a new series from the input query
-func (s *MovingAverageStorage) FetchByQuery(
+func (s *MovingFunctionStorage) FetchByQuery(
ctx context.Context,
query string,
opts storage.FetchOptions,
@@ -136,7 +142,7 @@ func (s *MovingAverageStorage) FetchByQuery(
}
// FetchByIDs builds a new series from the input query
-func (s *MovingAverageStorage) fetchByIDs(
+func (s *MovingFunctionStorage) fetchByIDs(
ctx context.Context,
ids []string,
opts storage.FetchOptions,
diff --git a/src/query/graphite/common/transform_test.go b/src/query/graphite/common/transform_test.go
index c1d782f381..6c58c90044 100644
--- a/src/query/graphite/common/transform_test.go
+++ b/src/query/graphite/common/transform_test.go
@@ -368,7 +368,7 @@ func TestPerSecond(t *testing.T) {
TestSeries{Name: "foo | perSecond", Data: []float64{nan, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}},
TestSeries{Name: "foo | perSecond", Data: []float64{nan, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0}},
TestSeries{Name: "foo | perSecond", Data: []float64{nan, 1.0, 1.0, 1.0, 1.0, nan, 1.0, 1.0, 1.0, 1.0}},
- TestSeries{Name: "foo | perSecond", Data: []float64{nan, nan, nan, nan, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}},
+ TestSeries{Name: "foo | perSecond", Data: []float64{nan, nan, nan, nan, 1.0, nan, 1.0, 1.0, 1.0, 1.0}},
TestSeries{Name: "foo | perSecond", Data: []float64{nan, 1.0, 1.0, nan, nan, nan, 1.0, 1.0, 1.0, 1.0}},
}
input := ts.SeriesList{Values: inputSeries}
diff --git a/src/query/graphite/graphite/tags.go b/src/query/graphite/graphite/tags.go
index 1287370d87..cd857a4f28 100644
--- a/src/query/graphite/graphite/tags.go
+++ b/src/query/graphite/graphite/tags.go
@@ -20,7 +20,14 @@
package graphite
-import "fmt"
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/x/unsafe"
+)
const (
// graphiteFormat is the format for graphite metric tag names, which will be
@@ -43,12 +50,21 @@ const (
var (
// Should never be modified after init().
preFormattedTagNames [][]byte
+ // Should never be modified after init().
+ preFormattedTagNameIDs []ident.ID
+
+ // Prefix is the prefix for graphite metrics
+ Prefix = []byte("__g")
+
+ // Suffix is the suffix for graphite metrics
+ suffix = []byte("__")
)
func init() {
for i := 0; i < numPreFormattedTagNames; i++ {
name := generateTagName(i)
preFormattedTagNames = append(preFormattedTagNames, name)
+ preFormattedTagNameIDs = append(preFormattedTagNameIDs, ident.BytesID(name))
}
}
@@ -62,6 +78,32 @@ func TagName(idx int) []byte {
return []byte(fmt.Sprintf(graphiteFormat, idx))
}
+// TagNameID gets a preallocated or generate a tag name ID for the given graphite
+// path index.
+func TagNameID(idx int) ident.ID {
+ if idx < len(preFormattedTagNameIDs) {
+ return preFormattedTagNameIDs[idx]
+ }
+
+ return ident.StringID(fmt.Sprintf(graphiteFormat, idx))
+}
+
func generateTagName(idx int) []byte {
return []byte(fmt.Sprintf(graphiteFormat, idx))
}
+
+// TagIndex returns the index given the tag.
+func TagIndex(tag []byte) (int, bool) {
+ if !bytes.HasPrefix(tag, Prefix) ||
+ !bytes.HasSuffix(tag, suffix) {
+ return 0, false
+ }
+ start := len(Prefix)
+ end := len(tag) - len(suffix)
+ indexStr := unsafe.String(tag[start:end])
+ index, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return 0, false
+ }
+ return index, true
+}
diff --git a/src/query/graphite/graphite/tags_test.go b/src/query/graphite/graphite/tags_test.go
index c09cac246b..c7726dd5f8 100644
--- a/src/query/graphite/graphite/tags_test.go
+++ b/src/query/graphite/graphite/tags_test.go
@@ -33,3 +33,39 @@ func TestTagName(t *testing.T) {
require.Equal(t, expected, TagName(i))
}
}
+
+func TestTagIndex(t *testing.T) {
+ for _, test := range []struct {
+ tag []byte
+ isGraphite bool
+ index int
+ }{
+ {
+ tag: []byte("__g0__"),
+ isGraphite: true,
+ },
+ {
+ tag: []byte("__g11__"),
+ isGraphite: true,
+ index: 11,
+ },
+ {
+ tag: []byte("__g__"),
+ },
+ {
+ tag: []byte("__gabc__"),
+ },
+ {
+ tag: []byte("_g1__"),
+ },
+ {
+ tag: []byte("__g1_"),
+ },
+ } {
+ t.Run(string(test.tag), func(t *testing.T) {
+ index, exists := TagIndex(test.tag)
+ require.Equal(t, test.isGraphite, exists)
+ require.Equal(t, test.index, index)
+ })
+ }
+}
diff --git a/src/query/graphite/lexer/lexer.go b/src/query/graphite/lexer/lexer.go
index ddd3f7ee76..2f4597db58 100644
--- a/src/query/graphite/lexer/lexer.go
+++ b/src/query/graphite/lexer/lexer.go
@@ -103,6 +103,9 @@ type Token struct {
value string
}
+// MustMakeToken is a test function for creating a Token.MustMakeToken.
+func MustMakeToken(value string) *Token { return &Token{value: value} }
+
// TokenType returns the type of token consumed.
func (t Token) TokenType() TokenType {
return t.tokenType
diff --git a/src/query/graphite/native/aggregation_functions.go b/src/query/graphite/native/aggregation_functions.go
index d79d8d419c..5ea1949431 100644
--- a/src/query/graphite/native/aggregation_functions.go
+++ b/src/query/graphite/native/aggregation_functions.go
@@ -157,6 +157,25 @@ func sumSeriesWithWildcards(
return combineSeriesWithWildcards(ctx, series, positions, sumSpecificationFunc, ts.Sum)
}
+// aggregateWithWildcards splits the given set of series into sub-groupings
+// based on wildcard matches in the hierarchy, then aggregate the values in
+// each grouping based on the given function.
+func aggregateWithWildcards(
+ ctx *common.Context,
+ series singlePathSpec,
+ fname string,
+ positions ...int,
+) (ts.SeriesList, error) {
+ f, fexists := summarizeFuncs[fname]
+ if !fexists {
+ err := errors.NewInvalidParamsError(fmt.Errorf(
+ "invalid func %s", fname))
+ return ts.NewSeriesList(), err
+ }
+
+ return combineSeriesWithWildcards(ctx, series, positions, f.specificationFunc, f.consolidationFunc)
+}
+
// combineSeriesWithWildcards splits the given set of series into sub-groupings
// based on wildcard matches in the hierarchy, then combines the values in each
// sub-grouping according to the provided consolidation function
@@ -247,6 +266,55 @@ func groupByNode(ctx *common.Context, series singlePathSpec, node int, fname str
metaSeries[key] = append(metaSeries[key], s)
}
+ return applyFnToMetaSeries(ctx, series, metaSeries, fname)
+}
+
+// Takes a serieslist and maps a callback to subgroups within as defined by multiple nodes
+//
+// &target=groupByNodes(ganglia.server*.*.cpu.load*,"sum",1,4)
+//
+// Would return multiple series which are each the result of applying the “sum” aggregation to groups joined on the
+// nodes’ list (0 indexed) resulting in a list of targets like
+//
+// sumSeries(ganglia.server1.*.cpu.load5),sumSeries(ganglia.server1.*.cpu.load10),sumSeries(ganglia.server1.*.cpu.load15),
+// sumSeries(ganglia.server2.*.cpu.load5),sumSeries(ganglia.server2.*.cpu.load10),sumSeries(ganglia.server2.*.cpu.load15),...
+//
+// NOTE: if len(nodes) = 0, aggregate all series into 1 series.
+func groupByNodes(ctx *common.Context, series singlePathSpec, fname string, nodes ...int) (ts.SeriesList, error) {
+ metaSeries := make(map[string][]*ts.Series)
+
+ nodeLen := len(nodes)
+ if nodeLen == 0 {
+ key := "*" // put into single group, not ideal, but more graphite-ish.
+ for _, s := range series.Values {
+ metaSeries[key] = append(metaSeries[key], s)
+ }
+ } else {
+ for _, s := range series.Values {
+ parts := strings.Split(s.Name(), ".")
+
+ var keys []string
+ for _, n := range nodes {
+ if n < 0 {
+ n = len(parts) + n
+ }
+
+ if n >= len(parts) || n < 0 {
+ err := errors.NewInvalidParamsError(fmt.Errorf("could not group %s by nodes %v; not enough parts", s.Name(), nodes))
+ return ts.NewSeriesList(), err
+ }
+
+ keys = append(keys, parts[n])
+ }
+ key := strings.Join(keys, ".")
+ metaSeries[key] = append(metaSeries[key], s)
+ }
+ }
+
+ return applyFnToMetaSeries(ctx, series, metaSeries, fname)
+}
+
+func applyFnToMetaSeries(ctx *common.Context, series singlePathSpec, metaSeries map[string][]*ts.Series, fname string) (ts.SeriesList, error) {
if fname == "" {
fname = "sum"
}
diff --git a/src/query/graphite/native/aggregation_functions_test.go b/src/query/graphite/native/aggregation_functions_test.go
index 60bf009a73..cb19232d5b 100644
--- a/src/query/graphite/native/aggregation_functions_test.go
+++ b/src/query/graphite/native/aggregation_functions_test.go
@@ -331,6 +331,56 @@ func TestSumSeriesWithWildcards(t *testing.T) {
}
}
+func TestAggregateWithWildcards(t *testing.T) {
+ var (
+ start, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:41:19 GMT")
+ end, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:43:19 GMT")
+ ctx = common.NewContext(common.ContextOptions{Start: start, End: end})
+ inputs = []*ts.Series{
+ ts.NewSeries(ctx, "servers.foo-1.pod1.status.500", start,
+ ts.NewConstantValues(ctx, 2, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-2.pod1.status.500", start,
+ ts.NewConstantValues(ctx, 4, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-3.pod1.status.500", start,
+ ts.NewConstantValues(ctx, 6, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-1.pod2.status.500", start,
+ ts.NewConstantValues(ctx, 8, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-2.pod2.status.500", start,
+ ts.NewConstantValues(ctx, 10, 12, 10000)),
+
+ ts.NewSeries(ctx, "servers.foo-1.pod1.status.400", start,
+ ts.NewConstantValues(ctx, 20, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-2.pod1.status.400", start,
+ ts.NewConstantValues(ctx, 30, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-3.pod2.status.400", start,
+ ts.NewConstantValues(ctx, 40, 12, 10000)),
+ }
+ )
+ defer ctx.Close()
+
+ outSeries, err := aggregateWithWildcards(ctx, singlePathSpec{
+ Values: inputs,
+ }, "sum", 1, 2)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(outSeries.Values))
+
+ outSeries, _ = sortByName(ctx, singlePathSpec(outSeries))
+
+ expectedOutputs := []struct {
+ name string
+ sumOfVals float64
+ }{
+ {"servers.status.400", 90 * 12},
+ {"servers.status.500", 30 * 12},
+ }
+
+ for i, expected := range expectedOutputs {
+ series := outSeries.Values[i]
+ assert.Equal(t, expected.name, series.Name())
+ assert.Equal(t, expected.sumOfVals, series.SafeSum())
+ }
+}
+
func TestGroupByNode(t *testing.T) {
var (
start, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:41:19 GMT")
@@ -401,6 +451,85 @@ func TestGroupByNode(t *testing.T) {
}
}
+func TestGroupByNodes(t *testing.T) {
+ var (
+ start, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:41:19 GMT")
+ end, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:43:19 GMT")
+ ctx = common.NewContext(common.ContextOptions{Start: start, End: end})
+ inputs = []*ts.Series{
+ ts.NewSeries(ctx, "servers.foo-1.pod1.status.500", start,
+ ts.NewConstantValues(ctx, 2, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-2.pod1.status.500", start,
+ ts.NewConstantValues(ctx, 4, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-3.pod1.status.500", start,
+ ts.NewConstantValues(ctx, 6, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-1.pod2.status.500", start,
+ ts.NewConstantValues(ctx, 8, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-2.pod2.status.500", start,
+ ts.NewConstantValues(ctx, 10, 12, 10000)),
+
+ ts.NewSeries(ctx, "servers.foo-1.pod1.status.400", start,
+ ts.NewConstantValues(ctx, 20, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-2.pod1.status.400", start,
+ ts.NewConstantValues(ctx, 30, 12, 10000)),
+ ts.NewSeries(ctx, "servers.foo-3.pod2.status.400", start,
+ ts.NewConstantValues(ctx, 40, 12, 10000)),
+ }
+ )
+ defer ctx.Close()
+
+ type result struct {
+ name string
+ sumOfVals float64
+ }
+
+ tests := []struct {
+ fname string
+ nodes []int
+ expectedResults []result
+ }{
+ {"avg", []int{2, 4}, []result{ // test normal group by nodes
+ {"pod1.400", ((20 + 30) / 2) * 12},
+ {"pod1.500", ((2 + 4 + 6) / 3) * 12},
+ {"pod2.400", (40 / 1) * 12},
+ {"pod2.500", ((8 + 10) / 2) * 12},
+ }},
+ {"max", []int{2, 4}, []result{ // test with different function
+ {"pod1.400", 30 * 12},
+ {"pod1.500", 6 * 12},
+ {"pod2.400", 40 * 12},
+ {"pod2.500", 10 * 12},
+ }},
+ {"min", []int{2, -1}, []result{ // test negative index handling
+ {"pod1.400", 20 * 12},
+ {"pod1.500", 2 * 12},
+ {"pod2.400", 40 * 12},
+ {"pod2.500", 8 * 12},
+ }},
+ {"sum", []int{}, []result{ // test empty slice handing.
+ {"*", (2 + 4 + 6 + 8 + 10 + 20 + 30 + 40) * 12},
+ }},
+ }
+
+ for _, test := range tests {
+ outSeries, err := groupByNodes(ctx, singlePathSpec{
+ Values: inputs,
+ }, test.fname, test.nodes...)
+ require.NoError(t, err)
+ require.Equal(t, len(test.expectedResults), len(outSeries.Values))
+
+ outSeries, _ = sortByName(ctx, singlePathSpec(outSeries))
+
+ for i, expected := range test.expectedResults {
+ series := outSeries.Values[i]
+ assert.Equal(t, expected.name, series.Name(),
+ "wrong name for %v %s (%d)", test.nodes, test.fname, i)
+ assert.Equal(t, expected.sumOfVals, series.SafeSum(),
+ "wrong result for %v %s (%d)", test.nodes, test.fname, i)
+ }
+ }
+}
+
func TestWeightedAverage(t *testing.T) {
ctx, _ := newConsolidationTestSeries()
defer ctx.Close()
diff --git a/src/query/graphite/native/builtin_functions.go b/src/query/graphite/native/builtin_functions.go
index e10ba3c856..4a904f4932 100644
--- a/src/query/graphite/native/builtin_functions.go
+++ b/src/query/graphite/native/builtin_functions.go
@@ -32,7 +32,9 @@ import (
"github.com/m3db/m3/src/query/graphite/common"
"github.com/m3db/m3/src/query/graphite/errors"
+ "github.com/m3db/m3/src/query/graphite/graphite"
"github.com/m3db/m3/src/query/graphite/ts"
+ "github.com/m3db/m3/src/query/util"
)
const (
@@ -241,6 +243,81 @@ func timeShift(
}, nil
}
+// delay shifts all samples later by an integer number of steps. This can be used
+// for custom derivative calculations, among other things. Note: this will pad
+// the early end of the data with NaN for every step shifted. delay complements
+// other time-displacement functions such as timeShift and timeSlice, in that
+// delay is indifferent about the step intervals being shifted.
+func delay(
+ ctx *common.Context,
+ singlePath singlePathSpec,
+ steps int,
+) (ts.SeriesList, error) {
+ input := ts.SeriesList(singlePath)
+ output := make([]*ts.Series, 0, input.Len())
+
+ for _, series := range input.Values {
+ delayedVals := delayValuesHelper(ctx, series, steps)
+ delayedSeries := ts.NewSeries(ctx, series.Name(), series.StartTime(), delayedVals)
+ renamedSeries := delayedSeries.RenamedTo(fmt.Sprintf("delay(%s,%d)", delayedSeries.Name(), steps))
+ output = append(output, renamedSeries)
+ }
+ input.Values = output
+ return input, nil
+}
+
+// delayValuesHelper takes a series and returns a copy of the values after
+// delaying the values by `steps` number of steps
+func delayValuesHelper(ctx *common.Context, series *ts.Series, steps int) ts.Values {
+ output := ts.NewValues(ctx, series.MillisPerStep(), series.Len())
+ for i := steps; i < series.Len(); i++ {
+ output.SetValueAt(i, series.ValueAt(i - steps))
+ }
+ return output
+}
+
+// timeSlice takes one metric or a wildcard metric, followed by a quoted string with the time to start the line and
+// another quoted string with the time to end the line. The start and end times are inclusive.
+// Useful for filtering out a part of a series of data from a wider range of data.
+func timeSlice(ctx *common.Context, inputPath singlePathSpec, start string, end string) (ts.SeriesList, error) {
+ var (
+ now = time.Now()
+ tzOffsetForAbsoluteTime time.Duration
+ )
+ startTime, err := graphite.ParseTime(start, now, tzOffsetForAbsoluteTime)
+ if err != nil {
+ return ts.NewSeriesList(), err
+ }
+ endTime, err := graphite.ParseTime(end, now, tzOffsetForAbsoluteTime)
+ if err != nil {
+ return ts.NewSeriesList(), err
+ }
+
+ input := ts.SeriesList(inputPath)
+ output := make([]*ts.Series, 0, input.Len())
+
+ for _, series := range input.Values {
+ stepDuration := time.Duration(series.MillisPerStep()) * time.Millisecond
+ truncatedValues := ts.NewValues(ctx, series.MillisPerStep(), series.Len())
+
+ currentTime := series.StartTime()
+ for i := 0; i < series.Len(); i++ {
+ equalOrAfterStart := currentTime.Equal(startTime) || currentTime.After(startTime)
+ beforeOrEqualEnd := currentTime.Before(endTime) || currentTime.Equal(endTime)
+ if equalOrAfterStart && beforeOrEqualEnd {
+ truncatedValues.SetValueAt(i, series.ValueAtTime(currentTime))
+ }
+ currentTime = currentTime.Add(stepDuration)
+ }
+
+ slicedSeries := ts.NewSeries(ctx, series.Name(), series.StartTime(), truncatedValues)
+ renamedSlicedSeries := slicedSeries.RenamedTo(fmt.Sprintf("timeSlice(%s, %s, %s)", slicedSeries.Name(), start, end))
+ output = append(output, renamedSlicedSeries)
+ }
+ input.Values = output
+ return input, nil
+}
+
// absolute returns the absolute value of each element in the series.
func absolute(ctx *common.Context, input singlePathSpec) (ts.SeriesList, error) {
return transform(ctx, input,
@@ -555,61 +632,76 @@ func lowestCurrent(_ *common.Context, input singlePathSpec, n int) (ts.SeriesLis
// windowSizeFunc calculates window size for moving average calculation
type windowSizeFunc func(stepSize int) int
-// movingAverage calculates the moving average of a metric (or metrics) over a time interval.
-func movingAverage(ctx *common.Context, input singlePathSpec, windowSizeValue genericInterface) (*binaryContextShifter, error) {
- if len(input.Values) == 0 {
- return nil, nil
- }
+type windowSizeParsed struct {
+ deltaValue time.Duration
+ stringValue string
+ windowSizeFunc windowSizeFunc
+}
- var delta time.Duration
- var wf windowSizeFunc
- var ws string
+func parseWindowSize(windowSizeValue genericInterface, input singlePathSpec) (windowSizeParsed, error) {
+ windowSize := windowSizeParsed{}
switch windowSizeValue := windowSizeValue.(type) {
case string:
interval, err := common.ParseInterval(windowSizeValue)
if err != nil {
- return nil, err
+ return windowSize, err
}
if interval <= 0 {
err := errors.NewInvalidParamsError(fmt.Errorf(
"windowSize must be positive but instead is %v",
interval))
- return nil, err
+ return windowSize, err
}
- wf = func(stepSize int) int { return int(int64(delta/time.Millisecond) / int64(stepSize)) }
- ws = fmt.Sprintf("%q", windowSizeValue)
- delta = interval
+ windowSize.windowSizeFunc = func(stepSize int) int {
+ return int(int64(windowSize.deltaValue/time.Millisecond) / int64(stepSize))
+ }
+ windowSize.stringValue = fmt.Sprintf("%q", windowSizeValue)
+ windowSize.deltaValue = interval
case float64:
windowSizeInt := int(windowSizeValue)
if windowSizeInt <= 0 {
err := errors.NewInvalidParamsError(fmt.Errorf(
"windowSize must be positive but instead is %d",
windowSizeInt))
- return nil, err
+ return windowSize, err
}
- wf = func(_ int) int { return windowSizeInt }
- ws = fmt.Sprintf("%d", windowSizeInt)
+ windowSize.windowSizeFunc = func(_ int) int { return windowSizeInt }
+ windowSize.stringValue = fmt.Sprintf("%d", windowSizeInt)
maxStepSize := input.Values[0].MillisPerStep()
for i := 1; i < len(input.Values); i++ {
maxStepSize = int(math.Max(float64(maxStepSize), float64(input.Values[i].MillisPerStep())))
}
- delta = time.Duration(maxStepSize*windowSizeInt) * time.Millisecond
+ windowSize.deltaValue = time.Duration(maxStepSize*windowSizeInt) * time.Millisecond
default:
err := errors.NewInvalidParamsError(fmt.Errorf(
"windowSize must be either a string or an int but instead is a %T",
windowSizeValue))
+ return windowSize, err
+ }
+ return windowSize, nil
+}
+
+// movingAverage calculates the moving average of a metric (or metrics) over a time interval.
+func movingAverage(ctx *common.Context, input singlePathSpec, windowSizeValue genericInterface) (*binaryContextShifter, error) {
+ if len(input.Values) == 0 {
+ return nil, nil
+ }
+
+ widowSize, err := parseWindowSize(windowSizeValue, input)
+
+ if err != nil {
return nil, err
}
contextShiftingFn := func(c *common.Context) *common.Context {
opts := common.NewChildContextOptions()
- opts.AdjustTimeRange(0, 0, delta, 0)
+ opts.AdjustTimeRange(0, 0, widowSize.deltaValue, 0)
childCtx := c.NewChildContext(opts)
return childCtx
}
- bootstrapStartTime, bootstrapEndTime := ctx.StartTime.Add(-delta), ctx.StartTime
+ bootstrapStartTime, bootstrapEndTime := ctx.StartTime.Add(-widowSize.deltaValue), ctx.StartTime
transformerFn := func(bootstrapped, original ts.SeriesList) (ts.SeriesList, error) {
bootstrapList, err := combineBootstrapWithOriginal(ctx,
bootstrapStartTime, bootstrapEndTime,
@@ -622,7 +714,7 @@ func movingAverage(ctx *common.Context, input singlePathSpec, windowSizeValue ge
for i, bootstrap := range bootstrapList.Values {
series := original.Values[i]
stepSize := series.MillisPerStep()
- windowPoints := wf(stepSize)
+ windowPoints := widowSize.windowSizeFunc(stepSize)
if windowPoints == 0 {
err := errors.NewInvalidParamsError(fmt.Errorf(
"windowSize should not be smaller than stepSize, windowSize=%v, stepSize=%d",
@@ -635,14 +727,17 @@ func movingAverage(ctx *common.Context, input singlePathSpec, windowSizeValue ge
vals := ts.NewValues(ctx, series.MillisPerStep(), numSteps)
sum := 0.0
num := 0
+ firstPoint := false
for i := 0; i < numSteps; i++ {
- // skip if the number of points received is less than the number of points
- // in the lookback window.
- if offset < windowPoints {
- continue
- }
- if i == 0 {
+ // NB: skip if the number of points received is less than the number
+ // of points in the lookback window.
+ if !firstPoint {
+ firstPoint = true
for j := offset - windowPoints; j < offset; j++ {
+ if j < 0 {
+ continue
+ }
+
v := bootstrap.ValueAt(j)
if !math.IsNaN(v) {
sum += v
@@ -650,22 +745,25 @@ func movingAverage(ctx *common.Context, input singlePathSpec, windowSizeValue ge
}
}
} else {
- prev := bootstrap.ValueAt(i + offset - windowPoints - 1)
- next := bootstrap.ValueAt(i + offset - 1)
- if !math.IsNaN(prev) {
- sum -= prev
- num--
+ if i+offset-windowPoints > 0 {
+ prev := bootstrap.ValueAt(i + offset - windowPoints - 1)
+ if !math.IsNaN(prev) {
+ sum -= prev
+ num--
+ }
}
+ next := bootstrap.ValueAt(i + offset - 1)
if !math.IsNaN(next) {
sum += next
num++
}
}
+
if num > 0 {
vals.SetValueAt(i, sum/float64(num))
}
}
- name := fmt.Sprintf("movingAverage(%s,%s)", series.Name(), ws)
+ name := fmt.Sprintf("movingAverage(%s,%s)", series.Name(), widowSize.stringValue)
newSeries := ts.NewSeries(ctx, name, series.StartTime(), vals)
results = append(results, newSeries)
}
@@ -1578,6 +1676,7 @@ func movingMedian(ctx *common.Context, _ singlePathSpec, windowSize string) (*bi
if err != nil {
return nil, err
}
+
if interval <= 0 {
return nil, common.ErrInvalidIntervalFormat
}
@@ -1609,12 +1708,22 @@ func movingMedian(ctx *common.Context, _ singlePathSpec, windowSize string) (*bi
return ts.NewSeriesList(), err
}
window := make([]float64, windowPoints)
+ util.Memset(window, math.NaN())
numSteps := series.Len()
offset := bootstrap.Len() - numSteps
vals := ts.NewValues(ctx, series.MillisPerStep(), numSteps)
for i := 0; i < numSteps; i++ {
for j := i + offset - windowPoints; j < i+offset; j++ {
- window[j-i-offset+windowPoints] = bootstrap.ValueAt(j)
+ if j < 0 || j >= bootstrap.Len() {
+ continue
+ }
+
+ idx := j - i - offset + windowPoints
+ if idx < 0 || idx > len(window)-1 {
+ continue
+ }
+
+ window[idx] = bootstrap.ValueAt(j)
}
nans := common.SafeSort(window)
if nans < windowPoints {
@@ -1852,6 +1961,7 @@ func init() {
MustRegisterFunction(dashed).WithDefaultParams(map[uint8]interface{}{
2: 5.0, // dashLength
})
+ MustRegisterFunction(delay)
MustRegisterFunction(derivative)
MustRegisterFunction(diffSeries)
MustRegisterFunction(divideSeries)
@@ -1859,6 +1969,7 @@ func init() {
MustRegisterFunction(fallbackSeries)
MustRegisterFunction(group)
MustRegisterFunction(groupByNode)
+ MustRegisterFunction(groupByNodes)
MustRegisterFunction(highestAverage)
MustRegisterFunction(highestCurrent)
MustRegisterFunction(highestMax)
@@ -1927,6 +2038,7 @@ func init() {
})
MustRegisterFunction(sumSeries)
MustRegisterFunction(sumSeriesWithWildcards)
+ MustRegisterFunction(aggregateWithWildcards)
MustRegisterFunction(sustainedAbove)
MustRegisterFunction(sustainedBelow)
MustRegisterFunction(threshold).WithDefaultParams(map[uint8]interface{}{
@@ -1939,6 +2051,9 @@ func init() {
MustRegisterFunction(timeShift).WithDefaultParams(map[uint8]interface{}{
3: true, // resetEnd
})
+ MustRegisterFunction(timeSlice).WithDefaultParams(map[uint8]interface{}{
+ 3: "now", // endTime
+ })
MustRegisterFunction(transformNull).WithDefaultParams(map[uint8]interface{}{
2: 0.0, // defaultValue
})
diff --git a/src/query/graphite/native/builtin_functions_test.go b/src/query/graphite/native/builtin_functions_test.go
index 6b76608f90..8becf9fe8e 100644
--- a/src/query/graphite/native/builtin_functions_test.go
+++ b/src/query/graphite/native/builtin_functions_test.go
@@ -28,11 +28,14 @@ import (
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/graphite/common"
+ "github.com/m3db/m3/src/query/graphite/context"
xctx "github.com/m3db/m3/src/query/graphite/context"
"github.com/m3db/m3/src/query/graphite/storage"
xtest "github.com/m3db/m3/src/query/graphite/testing"
"github.com/m3db/m3/src/query/graphite/ts"
+ xgomock "github.com/m3db/m3/src/x/test"
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -600,26 +603,26 @@ func TestTransformNull(t *testing.T) {
}
var (
- testMovingAverageBootstrap = testMovingAverageStart.Add(-30 * time.Second)
- testMovingAverageStart = time.Now().Truncate(time.Minute)
- testMovingAverageEnd = testMovingAverageStart.Add(time.Minute)
+ testMovingFunctionBootstrap = testMovingFunctionStart.Add(-30 * time.Second)
+ testMovingFunctionStart = time.Now().Truncate(time.Minute)
+ testMovingFunctionEnd = testMovingFunctionStart.Add(time.Minute)
)
-func testMovingAverage(t *testing.T, target, expectedName string, values, bootstrap, output []float64) {
+func testMovingFunction(t *testing.T, target, expectedName string, values, bootstrap, output []float64) {
ctx := common.NewTestContext()
defer ctx.Close()
engine := NewEngine(
- &common.MovingAverageStorage{
+ &common.MovingFunctionStorage{
StepMillis: 10000,
Bootstrap: bootstrap,
- BootstrapStart: testMovingAverageBootstrap,
+ BootstrapStart: testMovingFunctionBootstrap,
Values: values,
},
)
phonyContext := common.NewContext(common.ContextOptions{
- Start: testMovingAverageStart,
- End: testMovingAverageEnd,
+ Start: testMovingFunctionStart,
+ End: testMovingFunctionEnd,
Engine: engine,
})
@@ -635,38 +638,75 @@ func testMovingAverage(t *testing.T, target, expectedName string, values, bootst
}
expected = append(expected, expectedSeries)
}
- common.CompareOutputsAndExpected(t, 10000, testMovingAverageStart,
+ common.CompareOutputsAndExpected(t, 10000, testMovingFunctionStart,
expected, res.Values)
}
+var (
+ testGeneralFunctionStart = time.Now().Add(time.Minute * -11).Truncate(time.Minute)
+ testGeneralFunctionEnd = time.Now().Add(time.Minute * -3).Truncate(time.Minute)
+)
+
+// testGeneralFunction is a copy of testMovingFunction but without any logic for bootstrapping values
+func testGeneralFunction(t *testing.T, target, expectedName string, values, output []float64) {
+ ctx := common.NewTestContext()
+ defer ctx.Close()
+
+ engine := NewEngine(
+ &common.MovingFunctionStorage{
+ StepMillis: 60000,
+ Values: values,
+ },
+ )
+ phonyContext := common.NewContext(common.ContextOptions{
+ Start: testGeneralFunctionStart,
+ End: testGeneralFunctionEnd,
+ Engine: engine,
+ })
+
+ expr, err := phonyContext.Engine.(*Engine).Compile(target)
+ require.NoError(t, err)
+ res, err := expr.Execute(phonyContext)
+ require.NoError(t, err)
+ var expected []common.TestSeries
+ if output != nil {
+ expectedSeries := common.TestSeries{
+ Name: expectedName,
+ Data: output,
+ }
+ expected = append(expected, expectedSeries)
+ }
+ common.CompareOutputsAndExpected(t, 60000, testGeneralFunctionStart, expected, res.Values)
+}
+
func TestMovingAverageSuccess(t *testing.T) {
values := []float64{12.0, 19.0, -10.0, math.NaN(), 10.0}
bootstrap := []float64{3.0, 4.0, 5.0}
expected := []float64{4.0, 7.0, 12.0, 7.0, 4.5}
- testMovingAverage(t, "movingAverage(foo.bar.baz, '30s')", "movingAverage(foo.bar.baz,\"30s\")", values, bootstrap, expected)
- testMovingAverage(t, "movingAverage(foo.bar.baz, 3)", "movingAverage(foo.bar.baz,3)", values, bootstrap, expected)
- testMovingAverage(t, "movingAverage(foo.bar.baz, 3)", "movingAverage(foo.bar.baz,3)", nil, nil, nil)
+ testMovingFunction(t, "movingAverage(foo.bar.baz, '30s')", "movingAverage(foo.bar.baz,\"30s\")", values, bootstrap, expected)
+ testMovingFunction(t, "movingAverage(foo.bar.baz, 3)", "movingAverage(foo.bar.baz,3)", values, bootstrap, expected)
+ testMovingFunction(t, "movingAverage(foo.bar.baz, 3)", "movingAverage(foo.bar.baz,3)", nil, nil, nil)
bootstrapEntireSeries := []float64{3.0, 4.0, 5.0, 12.0, 19.0, -10.0, math.NaN(), 10.0}
- testMovingAverage(t, "movingAverage(foo.bar.baz, '30s')", "movingAverage(foo.bar.baz,\"30s\")", values, bootstrapEntireSeries, expected)
- testMovingAverage(t, "movingAverage(foo.bar.baz, 3)", "movingAverage(foo.bar.baz,3)", values, bootstrapEntireSeries, expected)
+ testMovingFunction(t, "movingAverage(foo.bar.baz, '30s')", "movingAverage(foo.bar.baz,\"30s\")", values, bootstrapEntireSeries, expected)
+ testMovingFunction(t, "movingAverage(foo.bar.baz, 3)", "movingAverage(foo.bar.baz,3)", values, bootstrapEntireSeries, expected)
}
-func testMovingAverageError(t *testing.T, target string) {
+func testMovingFunctionError(t *testing.T, target string) {
ctx := common.NewTestContext()
defer ctx.Close()
engine := NewEngine(
- &common.MovingAverageStorage{
+ &common.MovingFunctionStorage{
StepMillis: 10000,
Bootstrap: []float64{1.0},
- BootstrapStart: testMovingAverageBootstrap,
+ BootstrapStart: testMovingFunctionBootstrap,
Values: []float64{1.0},
},
)
phonyContext := common.NewContext(common.ContextOptions{
- Start: testMovingAverageStart,
- End: testMovingAverageEnd,
+ Start: testMovingFunctionStart,
+ End: testMovingFunctionEnd,
Engine: engine,
})
@@ -678,8 +718,8 @@ func testMovingAverageError(t *testing.T, target string) {
}
func TestMovingAverageError(t *testing.T) {
- testMovingAverageError(t, "movingAverage(foo.bar.baz, '-30s')")
- testMovingAverageError(t, "movingAverage(foo.bar.baz, 0)")
+ testMovingFunctionError(t, "movingAverage(foo.bar.baz, '-30s')")
+ testMovingFunctionError(t, "movingAverage(foo.bar.baz, 0)")
}
func TestIsNonNull(t *testing.T) {
@@ -1983,11 +2023,11 @@ func TestHoltWintersForecast(t *testing.T) {
"foo",
now,
1000,
- []float64{4.0, 5.0, 6.0},
+ []float64{4, 5.0, 6.0},
3 * time.Second,
now,
1000,
- []float64{4.0, 4.0, 4.10035},
+ []float64{math.NaN(), 4.0, 4.10035},
},
}
@@ -1998,6 +2038,7 @@ func TestHoltWintersForecast(t *testing.T) {
input.startTime,
common.NewTestSeriesValues(ctx, input.stepInMilli, input.values),
)
+
results, err := holtWintersForecastInternal(ctx, singlePathSpec{
Values: []*ts.Series{series},
}, input.duration)
@@ -2006,6 +2047,7 @@ func TestHoltWintersForecast(t *testing.T) {
Data: input.output,
}
require.Nil(t, err)
+
common.CompareOutputsAndExpected(t, input.newStep, input.newStartTime,
[]common.TestSeries{expected}, results.Values)
}
@@ -2040,10 +2082,10 @@ func TestHoltWintersConfidenceBands(t *testing.T) {
3 * time.Second,
now,
1000,
- []float64{0.4787, 3.7, 3.5305},
+ []float64{math.NaN(), 3.7, 3.5305},
now,
1000,
- []float64{2.1039, 4.3, 4.6702},
+ []float64{math.NaN(), 4.3, 4.6702},
},
}
@@ -2448,13 +2490,13 @@ func TestChanged(t *testing.T) {
expected, results.Values)
}
-// TODO: re-enable
-// nolint
-func testMovingMedian(t *testing.T) {
- now := time.Now()
- engine := NewEngine(
- testStorage,
- )
+func TestMovingMedian(t *testing.T) {
+ ctrl := xgomock.NewController(t)
+ defer ctrl.Finish()
+
+ store := storage.NewMockStorage(ctrl)
+ now := time.Now().Truncate(time.Hour)
+ engine := NewEngine(store)
startTime := now.Add(-3 * time.Minute)
endTime := now.Add(-time.Minute)
ctx := common.NewContext(common.ContextOptions{Start: startTime, End: endTime, Engine: engine})
@@ -2462,6 +2504,8 @@ func testMovingMedian(t *testing.T) {
stepSize := 60000
target := "movingMedian(foo.bar.q.zed, '1min')"
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
+ buildTestSeriesFn(stepSize, "foo.bar.q.zed")).Times(2)
expr, err := engine.Compile(target)
require.NoError(t, err)
res, err := expr.Execute(ctx)
@@ -2474,6 +2518,125 @@ func testMovingMedian(t *testing.T) {
[]common.TestSeries{expected}, res.Values)
}
+func TestMovingAverage(t *testing.T) {
+ ctrl := xgomock.NewController(t)
+ defer ctrl.Finish()
+
+ store := storage.NewMockStorage(ctrl)
+ now := time.Now().Truncate(time.Hour)
+ engine := NewEngine(store)
+ startTime := now.Add(-3 * time.Minute)
+ endTime := now.Add(-1 * time.Minute)
+ ctx := common.NewContext(common.ContextOptions{Start: startTime, End: endTime, Engine: engine})
+ defer ctx.Close()
+
+ stepSize := 60000
+ target := `movingAverage(timeShift(foo.bar.g.zed, '-1d'), '1min')`
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
+ buildTestSeriesFn(stepSize, "foo.bar.g.zed")).Times(2)
+ expr, err := engine.Compile(target)
+ require.NoError(t, err)
+ res, err := expr.Execute(ctx)
+ require.NoError(t, err)
+ expected := common.TestSeries{
+ Name: `movingAverage(timeShift(foo.bar.g.zed, -1d),"1min")`,
+ Data: []float64{1, 1},
+ }
+ common.CompareOutputsAndExpected(t, stepSize, startTime,
+ []common.TestSeries{expected}, res.Values)
+}
+
+func TestMovingMedianInvalidLimits(t *testing.T) {
+ ctrl := xgomock.NewController(t)
+ defer ctrl.Finish()
+
+ store := storage.NewMockStorage(ctrl)
+ now := time.Now().Truncate(time.Hour)
+ engine := NewEngine(store)
+ startTime := now.Add(-3 * time.Minute)
+ endTime := now.Add(-time.Minute)
+ ctx := common.NewContext(common.ContextOptions{Start: startTime, End: endTime, Engine: engine})
+ defer ctx.Close()
+
+ stepSize := 60000
+ target := "movingMedian(foo.bar.q.zed, '1min')"
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
+ func(_ context.Context, q string, opts storage.FetchOptions) (*storage.FetchResult, error) {
+ startTime := opts.StartTime
+ ctx := context.New()
+ numSteps := int(opts.EndTime.Sub(startTime)/time.Millisecond) / stepSize
+ vals := ts.NewConstantValues(ctx, 0, numSteps, stepSize)
+ series := ts.NewSeries(ctx, "foo.bar.q.zed", opts.EndTime, vals)
+ return &storage.FetchResult{SeriesList: []*ts.Series{series}}, nil
+ }).Times(2)
+ expr, err := engine.Compile(target)
+ require.NoError(t, err)
+ res, err := expr.Execute(ctx)
+ require.NoError(t, err)
+ expected := common.TestSeries{
+ Name: "movingMedian(foo.bar.q.zed,\"1min\")",
+ Data: []float64{math.NaN(), 0.0},
+ }
+ common.CompareOutputsAndExpected(t, stepSize, endTime,
+ []common.TestSeries{expected}, res.Values)
+}
+
+func TestMovingMismatchedLimits(t *testing.T) {
+ // NB: this tests the behavior when query limits do not snap exactly to data
+ // points. When limits do not snap exactly, the first point should be omitted.
+ for _, fn := range []string{"movingAverage", "movingMedian"} {
+ for i := time.Duration(0); i < time.Minute; i += time.Second {
+ testMovingAverageInvalidLimits(t, fn, i)
+ }
+ }
+}
+
+func testMovingAverageInvalidLimits(t *testing.T, fn string, offset time.Duration) {
+ ctrl := xgomock.NewController(t)
+ defer ctrl.Finish()
+
+ store := storage.NewMockStorage(ctrl)
+ now := time.Now().Truncate(time.Hour).Add(offset)
+ engine := NewEngine(store)
+ startTime := now.Add(-3 * time.Minute)
+ endTime := now.Add(-time.Minute)
+ ctx := common.NewContext(common.ContextOptions{Start: startTime, End: endTime, Engine: engine})
+ defer ctx.Close()
+
+ stepSize := 60000
+ target := fmt.Sprintf(`%s(timeShift(foo.bar.*.zed, '-1d'), '1min')`, fn)
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
+ buildTestSeriesFn(stepSize, "foo.bar.g.zed", "foo.bar.x.zed"),
+ ).Times(2)
+ expr, err := engine.Compile(target)
+ require.NoError(t, err)
+ res, err := expr.Execute(ctx)
+ require.NoError(t, err)
+
+ expectedStart := startTime
+ expectedDataG := []float64{1, 1}
+ expectedDataX := []float64{2, 2}
+
+ if offset > 0 {
+ expectedStart = expectedStart.Add(time.Minute)
+ expectedDataG[0] = math.NaN()
+ expectedDataX[0] = math.NaN()
+ }
+
+ expected := []common.TestSeries{
+ {
+ Name: fmt.Sprintf(`%s(timeShift(foo.bar.g.zed, -1d),"1min")`, fn),
+ Data: expectedDataG,
+ },
+ {
+ Name: fmt.Sprintf(`%s(timeShift(foo.bar.x.zed, -1d),"1min")`, fn),
+ Data: expectedDataX,
+ },
+ }
+
+ common.CompareOutputsAndExpected(t, stepSize, expectedStart, expected, res.Values)
+}
+
func TestLegendValue(t *testing.T) {
ctx := common.NewTestContext()
defer ctx.Close()
@@ -2680,13 +2843,13 @@ func TestTimeFunction(t *testing.T) {
[]common.TestSeries{expected}, results.Values)
}
-// TODO arnikola reenable
-// nolint
-func testTimeShift(t *testing.T) {
- now := time.Now()
- engine := NewEngine(
- testStorage,
- )
+func TestTimeShift(t *testing.T) {
+ ctrl := xgomock.NewController(t)
+ defer ctrl.Finish()
+
+ store := storage.NewMockStorage(ctrl)
+ now := time.Now().Truncate(time.Hour)
+ engine := NewEngine(store)
startTime := now.Add(-3 * time.Minute)
endTime := now.Add(-time.Minute)
ctx := common.NewContext(common.ContextOptions{
@@ -2698,6 +2861,10 @@ func testTimeShift(t *testing.T) {
stepSize := 60000
target := "timeShift(foo.bar.q.zed, '1min', false)"
+
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
+ buildTestSeriesFn(stepSize, "foo.bar.q.zed"))
+
expr, err := engine.Compile(target)
require.NoError(t, err)
res, err := expr.Execute(ctx)
@@ -2710,6 +2877,68 @@ func testTimeShift(t *testing.T) {
[]common.TestSeries{expected}, res.Values)
}
+func TestDelay(t *testing.T) {
+ var values = [3][]float64{
+ {54.0, 48.0, 92.0, 54.0, 14.0, 1.2},
+ {4.0, 5.0, math.NaN(), 6.4, 7.2, math.NaN()},
+ {math.NaN(), 8.0, 9.0, 10.6, 11.2, 12.2},
+ }
+ expected := [3][]float64{
+ {math.NaN(), math.NaN(), math.NaN(), 54.0, 48.0, 92.0},
+ {math.NaN(), math.NaN(), math.NaN(), 4.0, 5.0, math.NaN()},
+ {math.NaN(), math.NaN(), math.NaN(), math.NaN(), 8.0, 9.0},
+ }
+
+ for index, value := range values {
+ e := expected[index]
+ testDelay(t, "delay(foo.bar.baz, 3)", "delay(foo.bar.baz,3)", value, e)
+ }
+}
+
+var (
+ testDelayStart = time.Now().Truncate(time.Minute)
+ testDelayEnd = testMovingFunctionEnd.Add(time.Minute)
+)
+
+func testDelay(t *testing.T, target, expectedName string, values, output []float64) {
+ ctx := common.NewTestContext()
+ defer ctx.Close()
+
+ engine := NewEngine(
+ &common.MovingFunctionStorage{
+ StepMillis: 10000,
+ Values: values,
+ },
+ )
+ phonyContext := common.NewContext(common.ContextOptions{
+ Start: testDelayStart,
+ End: testDelayEnd,
+ Engine: engine,
+ })
+
+ expr, err := phonyContext.Engine.(*Engine).Compile(target)
+ require.NoError(t, err)
+ res, err := expr.Execute(phonyContext)
+ require.NoError(t, err)
+ var expected []common.TestSeries
+
+ if output != nil {
+ expectedSeries := common.TestSeries{
+ Name: expectedName,
+ Data: output,
+ }
+ expected = append(expected, expectedSeries)
+ }
+ common.CompareOutputsAndExpected(t, 10000, testDelayStart, expected, res.Values)
+}
+
+func TestTimeSlice(t *testing.T) {
+ values := []float64{math.NaN(),1.0,2.0,3.0,math.NaN(),5.0,6.0,math.NaN(),7.0,8.0,9.0}
+ expected := []float64{math.NaN(),math.NaN(),math.NaN(),3.0,math.NaN(),5.0,6.0,math.NaN(),7.0,math.NaN(),math.NaN()}
+
+ testGeneralFunction(t, "timeSlice(foo.bar.baz, '-9min','-3min')", "timeSlice(foo.bar.baz, -9min, -3min)", values, expected)
+}
+
func TestDashed(t *testing.T) {
ctx := common.NewTestContext()
defer ctx.Close()
@@ -2797,6 +3026,7 @@ func TestFunctionsRegistered(t *testing.T) {
"currentAbove",
"currentBelow",
"dashed",
+ "delay",
"derivative",
"diffSeries",
"divideSeries",
@@ -2804,6 +3034,7 @@ func TestFunctionsRegistered(t *testing.T) {
"fallbackSeries",
"group",
"groupByNode",
+ "groupByNodes",
"highestAverage",
"highestCurrent",
"highestMax",
@@ -2859,6 +3090,7 @@ func TestFunctionsRegistered(t *testing.T) {
"time",
"timeFunction",
"timeShift",
+ "timeSlice",
"transformNull",
"weightedAverage",
}
diff --git a/src/query/graphite/native/compiler.go b/src/query/graphite/native/compiler.go
index 9469ce4015..616bb21dd0 100644
--- a/src/query/graphite/native/compiler.go
+++ b/src/query/graphite/native/compiler.go
@@ -39,7 +39,8 @@ func compile(input string) (Expression, error) {
lex, tokens := lexer.NewLexer(input, booleanLiterals)
go lex.Run()
- c := compiler{input: input, tokens: tokens}
+ lookforward := newTokenLookforward(tokens)
+ c := compiler{input: input, tokens: lookforward}
expr, err := c.compileExpression()
// Exhaust all tokens until closed or else lexer won't close
@@ -49,15 +50,54 @@ func compile(input string) (Expression, error) {
return expr, err
}
+type tokenLookforward struct {
+ lookforward *lexer.Token
+ tokens chan *lexer.Token
+}
+
+func newTokenLookforward(tokens chan *lexer.Token) *tokenLookforward {
+ return &tokenLookforward{
+ tokens: tokens,
+ }
+}
+
+// get advances the lexer tokens.
+func (l *tokenLookforward) get() *lexer.Token {
+ if token := l.lookforward; token != nil {
+ l.lookforward = nil
+ return token
+ }
+
+ if token, ok := <-l.tokens; ok {
+ return token
+ }
+
+ return nil
+}
+
+func (l *tokenLookforward) peek() (*lexer.Token, bool) {
+ if l.lookforward != nil {
+ return l.lookforward, true
+ }
+
+ token, ok := <-l.tokens
+ if !ok {
+ return nil, false
+ }
+
+ l.lookforward = token
+ return token, true
+}
+
// A compiler converts an input string into an executable Expression
type compiler struct {
input string
- tokens chan *lexer.Token
+ tokens *tokenLookforward
}
// compileExpression compiles a top level expression
func (c *compiler) compileExpression() (Expression, error) {
- token := <-c.tokens
+ token := c.tokens.get()
if token == nil {
return noopExpression{}, nil
}
@@ -69,31 +109,54 @@ func (c *compiler) compileExpression() (Expression, error) {
case lexer.Identifier:
fc, err := c.compileFunctionCall(token.Value(), nil)
+ fetchCandidate := false
if err != nil {
- return nil, err
+ _, fnNotFound := err.(errFuncNotFound)
+ if fnNotFound && c.canCompileAsFetch(token.Value()) {
+ fetchCandidate = true
+ expr = newFetchExpression(token.Value())
+ } else {
+ return nil, err
+ }
}
- expr, err = newFuncExpression(fc)
- if err != nil {
- return nil, err
+ if !fetchCandidate {
+ expr, err = newFuncExpression(fc)
+ if err != nil {
+ return nil, err
+ }
}
default:
return nil, c.errorf("unexpected value %s", token.Value())
}
- if token := <-c.tokens; token != nil {
+ if token := c.tokens.get(); token != nil {
return nil, c.errorf("extra data %s", token.Value())
}
return expr, nil
}
+// canCompileAsFetch attempts to see if the given term is a non-delimited
+// carbon metric; no dots, without any trailing parentheses.
+func (c *compiler) canCompileAsFetch(fname string) bool {
+ if nextToken, hasNext := c.tokens.peek(); hasNext {
+ return nextToken.TokenType() != lexer.LParenthesis
+ }
+
+ return true
+}
+
+type errFuncNotFound struct{ err error }
+
+func (e errFuncNotFound) Error() string { return e.err.Error() }
+
// compileFunctionCall compiles a function call
func (c *compiler) compileFunctionCall(fname string, nextToken *lexer.Token) (*functionCall, error) {
fn := findFunction(fname)
if fn == nil {
- return nil, c.errorf("could not find function named %s", fname)
+ return nil, errFuncNotFound{c.errorf("could not find function named %s", fname)}
}
if nextToken != nil {
@@ -158,7 +221,7 @@ func (c *compiler) compileFunctionCall(fname string, nextToken *lexer.Token) (*f
// compileArg parses and compiles a single argument
func (c *compiler) compileArg(fname string, index int,
reflectType reflect.Type) (arg funcArg, foundRParen bool, err error) {
- token := <-c.tokens
+ token := c.tokens.get()
if token == nil {
return nil, false, c.errorf("unexpected eof while parsing %s", fname)
}
@@ -173,7 +236,7 @@ func (c *compiler) compileArg(fname string, index int,
fname, token.Value())
}
- if token = <-c.tokens; token == nil {
+ if token = c.tokens.get(); token == nil {
return nil, false, c.errorf("unexpected eof while parsing %s", fname)
}
}
@@ -219,13 +282,13 @@ func (c *compiler) convertTokenToArg(token *lexer.Token, reflectType reflect.Typ
currentToken := token.Value()
// handle named arguments
- nextToken := <-c.tokens
+ nextToken := c.tokens.get()
if nextToken == nil {
return nil, c.errorf("unexpected eof, %s should be followed by = or (", currentToken)
}
if nextToken.TokenType() == lexer.Equal {
// TODO: check if currentToken matches the expected parameter name
- tokenAfterNext := <-c.tokens
+ tokenAfterNext := c.tokens.get()
if tokenAfterNext == nil {
return nil, c.errorf("unexpected eof, named argument %s should be followed by its value", currentToken)
}
@@ -240,7 +303,7 @@ func (c *compiler) convertTokenToArg(token *lexer.Token, reflectType reflect.Typ
// expectToken reads the next token and confirms it is the expected type before returning it
func (c *compiler) expectToken(expectedType lexer.TokenType) (*lexer.Token, error) {
- token := <-c.tokens
+ token := c.tokens.get()
if token == nil {
return nil, c.errorf("expected %v but encountered eof", expectedType)
}
diff --git a/src/query/graphite/native/compiler_test.go b/src/query/graphite/native/compiler_test.go
index 6149cd0408..61dc7b4cc8 100644
--- a/src/query/graphite/native/compiler_test.go
+++ b/src/query/graphite/native/compiler_test.go
@@ -26,6 +26,7 @@ import (
"testing"
"github.com/m3db/m3/src/query/graphite/common"
+ "github.com/m3db/m3/src/query/graphite/lexer"
xtest "github.com/m3db/m3/src/query/graphite/testing"
"github.com/m3db/m3/src/query/graphite/ts"
@@ -56,6 +57,7 @@ func TestCompile1(t *testing.T) {
tests := []testCompile{
{"", noopExpression{}},
+ {"foobar", newFetchExpression("foobar")},
{"foo.bar.{a,b,c}.baz-*.stat[0-9]",
newFetchExpression("foo.bar.{a,b,c}.baz-*.stat[0-9]")},
{"noArgs()", &funcExpression{&functionCall{f: noArgs}}},
@@ -290,6 +292,8 @@ type testCompilerError struct {
func TestCompileErrors(t *testing.T) {
tests := []testCompilerError{
{"hello()", "top-level functions must return timeseries data"},
+ {"foobar(", "invalid expression 'foobar(': could not find function named foobar"},
+ {"foobar()", "invalid expression 'foobar()': could not find function named foobar"},
{"sortByName(foo.*.zed)junk", "invalid expression 'sortByName(foo.*.zed)junk': " +
"extra data junk"},
{"aliasByNode(",
@@ -435,7 +439,40 @@ func TestExtractFetchExpressions(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, test.targets, targets, test.expr)
}
+}
+
+func TestTokenLookforward(t *testing.T) {
+ tokenVals := []string{"a", "b", "c"}
+ tokens := make(chan *lexer.Token)
+ go func() {
+ for _, v := range tokenVals {
+ tokens <- lexer.MustMakeToken(v)
+ }
+
+ close(tokens)
+ }()
+
+ lookforward := newTokenLookforward(tokens)
+ token := lookforward.get()
+ assert.Equal(t, "a", token.Value())
+
+ // assert that peek does not iterate token.
+ token, found := lookforward.peek()
+ assert.True(t, found)
+ assert.Equal(t, "b", token.Value())
+ token, found = lookforward.peek()
+ assert.True(t, found)
+ assert.Equal(t, "b", token.Value())
+
+ // assert that next get after peek will iterate forward.
+ token = lookforward.get()
+ assert.Equal(t, "b", token.Value())
+ token = lookforward.get()
+ assert.Equal(t, "c", token.Value())
+ // assert peek is empty once channel is closed.
+ _, found = lookforward.peek()
+ assert.False(t, found)
}
func init() {
diff --git a/src/query/graphite/native/engine_test.go b/src/query/graphite/native/engine_test.go
index d9e822a143..78cf64034a 100644
--- a/src/query/graphite/native/engine_test.go
+++ b/src/query/graphite/native/engine_test.go
@@ -24,30 +24,62 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/graphite/common"
+ "github.com/m3db/m3/src/query/graphite/context"
"github.com/m3db/m3/src/query/graphite/storage"
- xtime "github.com/m3db/m3/src/x/time"
+ "github.com/m3db/m3/src/query/graphite/ts"
+ xgomock "github.com/m3db/m3/src/x/test"
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-// nolint
type queryTestResult struct {
- name string
- max float64
+ series string
+ expected string
+ max float64
}
-// nolint
type queryTest struct {
query string
ordered bool
results []queryTestResult
}
+func snapStartToStepSize(t time.Time, stepSize int) time.Time {
+ step := time.Duration(stepSize) * time.Millisecond
+ if truncated := t.Truncate(step); truncated.Before(t) {
+ return t.Add(step)
+ }
+
+ return t
+}
+
+func testSeries(name string, stepSize int, val float64, opts storage.FetchOptions) *ts.Series {
+ ctx := context.New()
+ numSteps := int(opts.EndTime.Sub(opts.StartTime)/time.Millisecond) / stepSize
+ vals := ts.NewConstantValues(ctx, val, numSteps, stepSize)
+ firstPoint := snapStartToStepSize(opts.StartTime, stepSize)
+ return ts.NewSeries(ctx, name, firstPoint, vals)
+}
+
+func buildTestSeriesFn(
+ stepSize int,
+ id ...string,
+) func(context.Context, string, storage.FetchOptions) (*storage.FetchResult, error) {
+ return func(_ context.Context, q string, opts storage.FetchOptions) (*storage.FetchResult, error) {
+ series := make([]*ts.Series, 0, len(id))
+ for _, name := range id {
+ val := testValues[name]
+ series = append(series, testSeries(name, stepSize, val, opts))
+ }
+
+ return &storage.FetchResult{SeriesList: series}, nil
+ }
+}
+
var (
- // nolint
testValues = map[string]float64{
"foo.bar.q.zed": 0,
"foo.bar.g.zed": 1,
@@ -57,42 +89,56 @@ var (
"chicago.cake": 5,
"los_angeles.cake": 6,
}
-
- // nolint
- testPolicy = policy.NewStoragePolicy(10*time.Second, xtime.Second, 48*time.Hour)
- // testTSDB = makeTSDB(testPolicy)
- // nolint
- testStorage storage.Storage //= nil
- // local.NewLocalStorage(local.Options{
- // Database: testTSDB,
- // Workers: workers,
- // Scope: metrics.None,
- // PolicyResolver: resolver.NewStaticResolver(testIndex, testPolicy),
- // })
)
-// TODO arnikola reenable
-// nolint
-func testExecute(t *testing.T) {
- engine := NewEngine(
- testStorage,
- )
+func newTestStorage(ctrl *gomock.Controller) storage.Storage {
+ store := storage.NewMockStorage(ctrl)
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).
+ DoAndReturn(
+ func(
+ ctx context.Context,
+ query string,
+ opts storage.FetchOptions,
+ ) (*storage.FetchResult, error) {
+ return &storage.FetchResult{}, nil
+ })
+
+ return store
+}
+
+func TestExecute(t *testing.T) {
+ ctrl := xgomock.NewController(t)
+ defer ctrl.Finish()
+
+ store := storage.NewMockStorage(ctrl)
+ engine := NewEngine(store)
+
tests := []queryTest{
- {"foo.bar.q.zed", true, []queryTestResult{{"foo.bar.q.zed", 0}}},
+ {"foo.bar.q.zed", true, []queryTestResult{{"foo.bar.q.zed", "foo.bar.q.zed", 0}}},
{"foo.bar.*.zed", false, []queryTestResult{
- {"foo.bar.q.zed", 0},
- {"foo.bar.g.zed", 1},
- {"foo.bar.x.zed", 2}},
+ {"foo.bar.q.zed", "foo.bar.q.zed", 0},
+ {"foo.bar.g.zed", "foo.bar.g.zed", 1},
+ {"foo.bar.x.zed", "foo.bar.x.zed", 2}},
},
{"sortByName(aliasByNode(foo.bar.*.zed, 0, 2))", true, []queryTestResult{
- {"foo.g", 1},
- {"foo.q", 0},
- {"foo.x", 2},
+ {"foo.bar.g.zed", "foo.g", 1},
+ {"foo.bar.q.zed", "foo.q", 0},
+ {"foo.bar.x.zed", "foo.x", 2},
}},
}
ctx := common.NewContext(common.ContextOptions{Start: time.Now().Add(-1 * time.Hour), End: time.Now(), Engine: engine})
for _, test := range tests {
+
+ stepSize := 60000
+ queries := make([]string, 0, len(test.results))
+ for _, r := range test.results {
+ queries = append(queries, r.series)
+ }
+
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
+ buildTestSeriesFn(stepSize, queries...))
+
expr, err := engine.Compile(test.query)
require.Nil(t, err)
@@ -102,7 +148,7 @@ func testExecute(t *testing.T) {
for i := range test.results {
if test.ordered {
- assert.Equal(t, test.results[i].name, results.Values[i].Name(),
+ assert.Equal(t, test.results[i].expected, results.Values[i].Name(),
"invalid result %d for %s", i, test.query)
assert.Equal(t, test.results[i].max, results.Values[i].CalcStatistics().Max,
"invalid result %d for %s", i, test.query)
@@ -111,12 +157,13 @@ func testExecute(t *testing.T) {
}
}
-// TODO arnikola reenable
-// nolint
-func testTracing(t *testing.T) {
- engine := NewEngine(
- testStorage,
- )
+func TestTracing(t *testing.T) {
+ ctrl := xgomock.NewController(t)
+ defer ctrl.Finish()
+
+ store := storage.NewMockStorage(ctrl)
+
+ engine := NewEngine(store)
var traces []common.Trace
ctx := common.NewContext(common.ContextOptions{Start: time.Now().Add(-1 * time.Hour), End: time.Now(), Engine: engine})
@@ -124,6 +171,11 @@ func testTracing(t *testing.T) {
traces = append(traces, t)
}
+ stepSize := 60000
+ store.EXPECT().FetchByQuery(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(
+ buildTestSeriesFn(stepSize, "foo.bar.q.zed", "foo.bar.g.zed",
+ "foo.bar.x.zed"))
+
expr, err := engine.Compile("groupByNode(sortByName(aliasByNode(foo.bar.*.zed, 0, 2)), 0, 'sumSeries')")
require.NoError(t, err)
@@ -155,21 +207,3 @@ func testTracing(t *testing.T) {
assert.Equal(t, expected.Outputs, trace.Outputs, "incorrect outputs for trace %d", i)
}
}
-
-// func makeTSDB(policy policy.StoragePolicy) tsdb.Database {
-// var (
-// now = time.Now().Truncate(time.Second * 10)
-// testTSDB = nil //FIXME mocktsdb.New()
-// ctx = context.New()
-// )
-
-// defer ctx.Close()
-
-// for name, val := range testValues {
-// for t := now.Add(-time.Hour * 2); t.Before(now.Add(time.Hour)); t = t.Add(time.Second * 10) {
-// testTSDB.WriteRaw(ctx, name, t, val, policy)
-// }
-// }
-
-// return testIndex, testTSDB
-// }
diff --git a/src/query/graphite/native/functions.go b/src/query/graphite/native/functions.go
index ba17d76195..7b2098ad14 100644
--- a/src/query/graphite/native/functions.go
+++ b/src/query/graphite/native/functions.go
@@ -525,11 +525,15 @@ func (call *functionCall) Evaluate(ctx *common.Context) (reflect.Value, error) {
}
transformerFn := contextShifter.Field(1)
var ret []reflect.Value
- if call.f.out == unaryContextShifterPtrType {
+ switch call.f.out {
+ case unaryContextShifterPtrType:
// unary function
ret = transformerFn.Call([]reflect.Value{shiftedSeries})
- } else {
+ case binaryContextShifterPtrType:
+ // binary function
ret = transformerFn.Call([]reflect.Value{shiftedSeries, values[0]})
+ default:
+ return reflect.Value{}, fmt.Errorf("unknown context shift: %v", call.f.out)
}
if !ret[1].IsNil() {
err = ret[1].Interface().(error)
diff --git a/src/query/graphite/storage/m3_wrapper.go b/src/query/graphite/storage/m3_wrapper.go
index 5a574687b0..181b8dde60 100644
--- a/src/query/graphite/storage/m3_wrapper.go
+++ b/src/query/graphite/storage/m3_wrapper.go
@@ -223,7 +223,7 @@ func (s *m3WrappedStore) FetchByQuery(
m3ctx, cancel := context.WithTimeout(ctx.RequestContext(), opts.Timeout)
defer cancel()
fetchOptions := storage.NewFetchOptions()
- fetchOptions.Limit = opts.Limit
+ fetchOptions.SeriesLimit = opts.Limit
perQueryEnforcer := s.enforcer.Child(cost.QueryLevel)
defer perQueryEnforcer.Close()
diff --git a/src/query/graphite/storage/storage_mock.go b/src/query/graphite/storage/storage_mock.go
new file mode 100644
index 0000000000..bfe66c6d2e
--- /dev/null
+++ b/src/query/graphite/storage/storage_mock.go
@@ -0,0 +1,71 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/m3db/m3/src/query/graphite/storage (interfaces: Storage)
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package storage is a generated GoMock package.
+package storage
+
+import (
+ "reflect"
+
+ "github.com/m3db/m3/src/query/graphite/context"
+
+ "github.com/golang/mock/gomock"
+)
+
+// MockStorage is a mock of Storage interface
+type MockStorage struct {
+ ctrl *gomock.Controller
+ recorder *MockStorageMockRecorder
+}
+
+// MockStorageMockRecorder is the mock recorder for MockStorage
+type MockStorageMockRecorder struct {
+ mock *MockStorage
+}
+
+// NewMockStorage creates a new mock instance
+func NewMockStorage(ctrl *gomock.Controller) *MockStorage {
+ mock := &MockStorage{ctrl: ctrl}
+ mock.recorder = &MockStorageMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockStorage) EXPECT() *MockStorageMockRecorder {
+ return m.recorder
+}
+
+// FetchByQuery mocks base method
+func (m *MockStorage) FetchByQuery(arg0 context.Context, arg1 string, arg2 FetchOptions) (*FetchResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "FetchByQuery", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*FetchResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// FetchByQuery indicates an expected call of FetchByQuery
+func (mr *MockStorageMockRecorder) FetchByQuery(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchByQuery", reflect.TypeOf((*MockStorage)(nil).FetchByQuery), arg0, arg1, arg2)
+}
diff --git a/src/query/graphite/ts/series.go b/src/query/graphite/ts/series.go
index beacee25b1..2b74713341 100644
--- a/src/query/graphite/ts/series.go
+++ b/src/query/graphite/ts/series.go
@@ -136,7 +136,12 @@ func (b *Series) Resolution() time.Duration {
// StepAtTime returns the step within the block containing the given time
func (b *Series) StepAtTime(t time.Time) int {
- return int(t.UnixNano()/1000000-b.startTime.UnixNano()/1000000) / b.vals.MillisPerStep()
+ step := int(t.UnixNano()/1000000-b.startTime.UnixNano()/1000000) / b.vals.MillisPerStep()
+ if step < 0 {
+ return 0
+ }
+
+ return step
}
// StartTimeForStep returns the time at which the given step starts
diff --git a/src/query/models/options.go b/src/query/models/options.go
index 8fc8702979..043a82deec 100644
--- a/src/query/models/options.go
+++ b/src/query/models/options.go
@@ -23,10 +23,12 @@ package models
import (
"bytes"
"errors"
+
+ "github.com/prometheus/common/model"
)
var (
- defaultMetricName = []byte("__name__")
+ defaultMetricName = []byte(model.MetricNameLabel)
defaultBucketName = []byte("le")
errNoName = errors.New("metric name is missing or empty")
@@ -38,6 +40,7 @@ type tagOptions struct {
idScheme IDSchemeType
bucketName []byte
metricName []byte
+ filters Filters
}
// NewTagOptions builds a new tag options with default values.
@@ -62,9 +65,9 @@ func (o *tagOptions) Validate() error {
return o.idScheme.Validate()
}
-func (o *tagOptions) SetMetricName(metricName []byte) TagOptions {
+func (o *tagOptions) SetMetricName(value []byte) TagOptions {
opts := *o
- opts.metricName = metricName
+ opts.metricName = value
return &opts
}
@@ -72,9 +75,9 @@ func (o *tagOptions) MetricName() []byte {
return o.metricName
}
-func (o *tagOptions) SetBucketName(bucketName []byte) TagOptions {
+func (o *tagOptions) SetBucketName(value []byte) TagOptions {
opts := *o
- opts.bucketName = bucketName
+ opts.bucketName = value
return &opts
}
@@ -82,9 +85,9 @@ func (o *tagOptions) BucketName() []byte {
return o.bucketName
}
-func (o *tagOptions) SetIDSchemeType(scheme IDSchemeType) TagOptions {
+func (o *tagOptions) SetIDSchemeType(value IDSchemeType) TagOptions {
opts := *o
- opts.idScheme = scheme
+ opts.idScheme = value
return &opts
}
@@ -92,6 +95,16 @@ func (o *tagOptions) IDSchemeType() IDSchemeType {
return o.idScheme
}
+func (o *tagOptions) SetFilters(value Filters) TagOptions {
+ opts := *o
+ opts.filters = value
+ return &opts
+}
+
+func (o *tagOptions) Filters() Filters {
+ return o.filters
+}
+
func (o *tagOptions) Equals(other TagOptions) bool {
return o.idScheme == other.IDSchemeType() &&
bytes.Equal(o.metricName, other.MetricName()) &&
diff --git a/src/query/models/query_context.go b/src/query/models/query_context.go
index 6b748a4db4..abb87f47cf 100644
--- a/src/query/models/query_context.go
+++ b/src/query/models/query_context.go
@@ -44,7 +44,11 @@ type QueryContextOptions struct {
// LimitMaxTimeseries limits the number of time series returned by each
// storage node.
LimitMaxTimeseries int
- RestrictFetchType *RestrictFetchTypeQueryContextOptions
+ // LimitMaxDocs limits the number of docs returned by each storage node.
+ LimitMaxDocs int
+ // RequireExhaustive results in an error if the query exceeds the series limit.
+ RequireExhaustive bool
+ RestrictFetchType *RestrictFetchTypeQueryContextOptions
}
// RestrictFetchTypeQueryContextOptions allows for specifying the
diff --git a/src/query/models/tags.go b/src/query/models/tags.go
index bd956dfcbf..08abde0087 100644
--- a/src/query/models/tags.go
+++ b/src/query/models/tags.go
@@ -22,14 +22,19 @@ package models
import (
"bytes"
+ "errors"
"fmt"
"sort"
"strings"
- "github.com/m3db/m3/src/query/models/strconv"
- "github.com/m3db/m3/src/query/util/writer"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
+ xerrors "github.com/m3db/m3/src/x/errors"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
+)
+
+var (
+ errNoTags = errors.New("no tags")
)
// NewTags builds a tags with the given size and tag options.
@@ -50,229 +55,20 @@ func EmptyTags() Tags {
return NewTags(0, nil)
}
-// ID returns a byte slice representation of the tags, using the generation
-// strategy from the tag options.
-func (t Tags) ID() []byte {
- schemeType := t.Opts.IDSchemeType()
- if len(t.Tags) == 0 {
- if schemeType == TypeQuoted {
- return []byte("{}")
- }
-
- return []byte("")
- }
-
- switch schemeType {
- case TypeLegacy:
- return t.legacyID()
- case TypeQuoted:
- return t.quotedID()
- case TypePrependMeta:
- return t.prependMetaID()
- case TypeGraphite:
- return t.graphiteID()
- default:
- // Default to prepending meta
- return t.prependMetaID()
- }
-}
-
-func (t Tags) legacyID() []byte {
- // TODO: pool these bytes.
- id := make([]byte, t.idLen())
- idx := -1
- for _, tag := range t.Tags {
- idx += copy(id[idx+1:], tag.Name) + 1
- id[idx] = eq
- idx += copy(id[idx+1:], tag.Value) + 1
- id[idx] = sep
- }
-
- return id
-}
-
-func (t Tags) idLen() int {
- idLen := 2 * t.Len() // account for separators
- for _, tag := range t.Tags {
- idLen += len(tag.Name)
- idLen += len(tag.Value)
- }
-
- return idLen
-}
-
-type tagEscaping struct {
- escapeName bool
- escapeValue bool
-}
-
-func (t Tags) quotedID() []byte {
- var (
- idLen int
- needEscaping []tagEscaping
- l int
- escape tagEscaping
- )
-
- for i, tt := range t.Tags {
- l, escape = tt.serializedLength()
- idLen += l
- if escape.escapeName || escape.escapeValue {
- if needEscaping == nil {
- needEscaping = make([]tagEscaping, len(t.Tags))
- }
-
- needEscaping[i] = escape
- }
- }
-
- tagLength := 2 * len(t.Tags)
- idLen += tagLength + 1 // account for separators and brackets
- if needEscaping == nil {
- return t.quoteIDSimple(idLen)
- }
-
- // TODO: pool these bytes
- lastIndex := len(t.Tags) - 1
- id := make([]byte, idLen)
- id[0] = leftBracket
- idx := 1
- for i, tt := range t.Tags[:lastIndex] {
- idx = tt.writeAtIndex(id, needEscaping[i], idx)
- id[idx] = sep
- idx++
- }
-
- idx = t.Tags[lastIndex].writeAtIndex(id, needEscaping[lastIndex], idx)
- id[idx] = rightBracket
- return id
-}
-
-// adds quotes to tag values when no characters need escaping.
-func (t Tags) quoteIDSimple(length int) []byte {
- // TODO: pool these bytes.
- id := make([]byte, length)
- id[0] = leftBracket
- idx := 1
- lastIndex := len(t.Tags) - 1
- for _, tag := range t.Tags[:lastIndex] {
- idx += copy(id[idx:], tag.Name)
- id[idx] = eq
- idx++
- idx = strconv.QuoteSimple(id, tag.Value, idx)
- id[idx] = sep
- idx++
- }
-
- tag := t.Tags[lastIndex]
- idx += copy(id[idx:], tag.Name)
- id[idx] = eq
- idx++
- idx = strconv.QuoteSimple(id, tag.Value, idx)
- id[idx] = rightBracket
-
- return id
-}
-
-func (t Tag) writeAtIndex(id []byte, escape tagEscaping, idx int) int {
- if escape.escapeName {
- idx = strconv.Escape(id, t.Name, idx)
- } else {
- idx += copy(id[idx:], t.Name)
- }
-
- // add = character
- id[idx] = eq
- idx++
-
- if escape.escapeValue {
- idx = strconv.Quote(id, t.Value, idx)
- } else {
- idx = strconv.QuoteSimple(id, t.Value, idx)
- }
-
- return idx
-}
-
-func (t Tag) serializedLength() (int, tagEscaping) {
- var (
- idLen int
- escaping tagEscaping
- )
- if strconv.NeedToEscape(t.Name) {
- idLen += strconv.EscapedLength(t.Name)
- escaping.escapeName = true
- } else {
- idLen += len(t.Name)
- }
-
- if strconv.NeedToEscape(t.Value) {
- idLen += strconv.QuotedLength(t.Value)
- escaping.escapeValue = true
- } else {
- idLen += len(t.Value) + 2
- }
-
- return idLen, escaping
-}
-
-func (t Tags) prependMetaID() []byte {
- l, metaLengths := t.prependMetaLen()
- // TODO: pool these bytes.
- id := make([]byte, l)
- idx := writeTagLengthMeta(id, metaLengths)
- for _, tag := range t.Tags {
- idx += copy(id[idx:], tag.Name)
- idx += copy(id[idx:], tag.Value)
- }
-
- return id
-}
-
-func writeTagLengthMeta(dst []byte, lengths []int) int {
- idx := writer.WriteIntegers(dst, lengths, sep, 0)
- dst[idx] = finish
- return idx + 1
-}
-
-func (t Tags) prependMetaLen() (int, []int) {
- idLen := 1 // account for separator
- tagLengths := make([]int, len(t.Tags)*2)
- for i, tag := range t.Tags {
- tagLen := len(tag.Name)
- tagLengths[2*i] = tagLen
- idLen += tagLen
- tagLen = len(tag.Value)
- tagLengths[2*i+1] = tagLen
- idLen += tagLen
+// LastComputedID returns the last computed ID; this should only be
+// used when it is guaranteed that no tag transforms take place between calls.
+func (t *Tags) LastComputedID() []byte {
+ if t.id == nil {
+ t.id = t.ID()
}
- prefixLen := writer.IntsLength(tagLengths)
- return idLen + prefixLen, tagLengths
+ return t.id
}
-func (t Tags) graphiteID() []byte {
- // TODO: pool these bytes.
- id := make([]byte, t.idLenGraphite())
- idx := 0
- lastIndex := len(t.Tags) - 1
- for _, tag := range t.Tags[:lastIndex] {
- idx += copy(id[idx:], tag.Value)
- id[idx] = graphiteSep
- idx++
- }
-
- copy(id[idx:], t.Tags[lastIndex].Value)
- return id
-}
-
-func (t Tags) idLenGraphite() int {
- idLen := t.Len() - 1 // account for separators
- for _, tag := range t.Tags {
- idLen += len(tag.Value)
- }
-
- return idLen
+// ID returns a byte slice representation of the tags, using the generation
+// strategy from the tag options.
+func (t Tags) ID() []byte {
+ return id(t)
}
func (t Tags) tagSubset(keys [][]byte, include bool) Tags {
@@ -415,12 +211,18 @@ func (t Tags) Add(other Tags) Tags {
return t.Normalize()
}
+// Ensure Tags implements sort interface.
+var _ sort.Interface = Tags{}
+
func (t Tags) Len() int { return len(t.Tags) }
func (t Tags) Swap(i, j int) { t.Tags[i], t.Tags[j] = t.Tags[j], t.Tags[i] }
func (t Tags) Less(i, j int) bool {
return bytes.Compare(t.Tags[i].Name, t.Tags[j].Name) == -1
}
+// Ensure sortableTagsNumericallyAsc implements sort interface.
+var _ sort.Interface = sortableTagsNumericallyAsc{}
+
type sortableTagsNumericallyAsc Tags
func (t sortableTagsNumericallyAsc) Len() int { return len(t.Tags) }
@@ -444,8 +246,8 @@ func (t sortableTagsNumericallyAsc) Less(i, j int) bool {
// Normalize normalizes the tags by sorting them in place.
// In the future, it might also ensure other things like uniqueness.
func (t Tags) Normalize() Tags {
- // Graphite tags are sorted numerically rather than lexically.
if t.Opts.IDSchemeType() == TypeGraphite {
+ // Graphite tags are sorted numerically rather than lexically.
sort.Sort(sortableTagsNumericallyAsc(t))
} else {
sort.Sort(t)
@@ -454,7 +256,78 @@ func (t Tags) Normalize() Tags {
return t
}
-// HashedID returns the hashed ID for the tags.
+// Validate will validate there are tag values, and the
+// tags are ordered and there are no duplicates.
+func (t Tags) Validate() error {
+ // Wrap call to validate to make sure a validation error
+ // is always an invalid parameters error so we return bad request
+ // instead of internal server error at higher in the stack.
+ if err := t.validate(); err != nil {
+ return xerrors.NewInvalidParamsError(err)
+ }
+ return nil
+}
+
+func (t Tags) validate() error {
+ n := t.Len()
+ if n == 0 {
+ return errNoTags
+ }
+
+ if t.Opts.IDSchemeType() == TypeGraphite {
+ // Graphite tags are sorted numerically rather than lexically.
+ tags := sortableTagsNumericallyAsc(t)
+ for i, tag := range tags.Tags {
+ if len(tag.Name) == 0 {
+ return fmt.Errorf("tag name empty: index=%d", i)
+ }
+ if i == 0 {
+ continue // Don't check order/unique attributes.
+ }
+
+ if !tags.Less(i-1, i) {
+ return fmt.Errorf("graphite tags out of order: '%s' appears after"+
+ " '%s', tags: %v", tags.Tags[i-1].Name, tags.Tags[i].Name, tags.Tags)
+ }
+
+ prev := tags.Tags[i-1]
+ if bytes.Compare(prev.Name, tag.Name) == 0 {
+ return fmt.Errorf("tags duplicate: '%s' appears more than once",
+ tags.Tags[i-1].Name)
+ }
+ }
+ } else {
+ // Sorted alphanumerically otherwise, use bytes.Compare once for
+ // both order and unique test.
+ for i, tag := range t.Tags {
+ if len(tag.Name) == 0 {
+ return fmt.Errorf("tag name empty: index=%d", i)
+ }
+ if len(tag.Value) == 0 {
+ return fmt.Errorf("tag value empty: index=%d, name=%s",
+ i, t.Tags[i].Name)
+ }
+ if i == 0 {
+ continue // Don't check order/unique attributes.
+ }
+
+ prev := t.Tags[i-1]
+ cmp := bytes.Compare(prev.Name, t.Tags[i].Name)
+ if cmp > 0 {
+ return fmt.Errorf("tags out of order: '%s' appears after '%s', tags: %v",
+ prev.Name, tag.Name, t.Tags)
+ }
+ if cmp == 0 {
+ return fmt.Errorf("tags duplicate: '%s' appears more than once in '%s'",
+ prev.Name, t)
+ }
+ }
+ }
+
+ return nil
+}
+
+// Reset resets the tags for reuse.
func (t Tags) Reset() Tags {
t.Tags = t.Tags[:0]
return t
@@ -465,6 +338,16 @@ func (t Tags) HashedID() uint64 {
return xxhash.Sum64(t.ID())
}
+// LastComputedHashedID returns the last computed hashed ID; this should only be
+// used when it is guaranteed that no tag transforms take place between calls.
+func (t *Tags) LastComputedHashedID() uint64 {
+ if t.hashedID == 0 {
+ t.hashedID = xxhash.Sum64(t.LastComputedID())
+ }
+
+ return t.hashedID
+}
+
// Equals returns a boolean reporting whether the compared tags have the same
// values.
//
@@ -501,6 +384,26 @@ func (t Tags) String() string {
return sb.String()
}
+// TagsFromProto converts proto tags to models.Tags.
+func TagsFromProto(pbTags []*metricpb.Tag) []Tag {
+ tags := make([]Tag, 0, len(pbTags))
+ for _, tag := range pbTags {
+ tags = append(tags, Tag{
+ Name: tag.Name,
+ Value: tag.Value,
+ })
+ }
+ return tags
+}
+
+// ToProto converts the models.Tags to proto tags.
+func (t Tag) ToProto() *metricpb.Tag {
+ return &metricpb.Tag{
+ Name: t.Name,
+ Value: t.Value,
+ }
+}
+
// String returns the string representation of the tag.
func (t Tag) String() string {
return fmt.Sprintf("%s: %s", t.Name, t.Value)
diff --git a/src/query/models/tags_id_schemes.go b/src/query/models/tags_id_schemes.go
new file mode 100644
index 0000000000..33160247bc
--- /dev/null
+++ b/src/query/models/tags_id_schemes.go
@@ -0,0 +1,249 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package models
+
+import (
+ "github.com/m3db/m3/src/query/models/strconv"
+ "github.com/m3db/m3/src/query/util/writer"
+)
+
+func id(t Tags) []byte {
+ schemeType := t.Opts.IDSchemeType()
+ if len(t.Tags) == 0 {
+ if schemeType == TypeQuoted {
+ return []byte("{}")
+ }
+
+ return []byte("")
+ }
+
+ switch schemeType {
+ case TypeLegacy:
+ return legacyID(t)
+ case TypeQuoted:
+ return quotedID(t)
+ case TypePrependMeta:
+ return prependMetaID(t)
+ case TypeGraphite:
+ return graphiteID(t)
+ default:
+ // Default to quoted meta
+ // NB: realistically, schema defaults should be set by here.
+ return quotedID(t)
+ }
+}
+
+func legacyID(t Tags) []byte {
+ // TODO: pool these bytes.
+ id := make([]byte, idLen(t))
+ idx := -1
+ for _, tag := range t.Tags {
+ idx += copy(id[idx+1:], tag.Name) + 1
+ id[idx] = eq
+ idx += copy(id[idx+1:], tag.Value) + 1
+ id[idx] = sep
+ }
+
+ return id
+}
+
+func idLen(t Tags) int {
+ idLen := 2 * t.Len() // account for separators
+ for _, tag := range t.Tags {
+ idLen += len(tag.Name)
+ idLen += len(tag.Value)
+ }
+
+ return idLen
+}
+
+type tagEscaping struct {
+ escapeName bool
+ escapeValue bool
+}
+
+func quotedID(t Tags) []byte {
+ var (
+ idLen int
+ needEscaping []tagEscaping
+ l int
+ escape tagEscaping
+ )
+
+ for i, tt := range t.Tags {
+ l, escape = serializedLength(tt)
+ idLen += l
+ if escape.escapeName || escape.escapeValue {
+ if needEscaping == nil {
+ needEscaping = make([]tagEscaping, len(t.Tags))
+ }
+
+ needEscaping[i] = escape
+ }
+ }
+
+ tagLength := 2 * len(t.Tags)
+ idLen += tagLength + 1 // account for separators and brackets
+ if needEscaping == nil {
+ return quoteIDSimple(t, idLen)
+ }
+
+ // TODO: pool these bytes
+ lastIndex := len(t.Tags) - 1
+ id := make([]byte, idLen)
+ id[0] = leftBracket
+ idx := 1
+ for i, tt := range t.Tags[:lastIndex] {
+ idx = writeAtIndex(tt, id, needEscaping[i], idx)
+ id[idx] = sep
+ idx++
+ }
+
+ idx = writeAtIndex(t.Tags[lastIndex], id, needEscaping[lastIndex], idx)
+ id[idx] = rightBracket
+ return id
+}
+
+// adds quotes to tag values when no characters need escaping.
+func quoteIDSimple(t Tags, length int) []byte {
+ // TODO: pool these bytes.
+ id := make([]byte, length)
+ id[0] = leftBracket
+ idx := 1
+ lastIndex := len(t.Tags) - 1
+ for _, tag := range t.Tags[:lastIndex] {
+ idx += copy(id[idx:], tag.Name)
+ id[idx] = eq
+ idx++
+ idx = strconv.QuoteSimple(id, tag.Value, idx)
+ id[idx] = sep
+ idx++
+ }
+
+ tag := t.Tags[lastIndex]
+ idx += copy(id[idx:], tag.Name)
+ id[idx] = eq
+ idx++
+ idx = strconv.QuoteSimple(id, tag.Value, idx)
+ id[idx] = rightBracket
+
+ return id
+}
+
+func writeAtIndex(t Tag, id []byte, escape tagEscaping, idx int) int {
+ if escape.escapeName {
+ idx = strconv.Escape(id, t.Name, idx)
+ } else {
+ idx += copy(id[idx:], t.Name)
+ }
+
+ id[idx] = eq
+ idx++
+
+ if escape.escapeValue {
+ idx = strconv.Quote(id, t.Value, idx)
+ } else {
+ idx = strconv.QuoteSimple(id, t.Value, idx)
+ }
+
+ return idx
+}
+
+func serializedLength(t Tag) (int, tagEscaping) {
+ var (
+ idLen int
+ escaping tagEscaping
+ )
+ if strconv.NeedToEscape(t.Name) {
+ idLen += strconv.EscapedLength(t.Name)
+ escaping.escapeName = true
+ } else {
+ idLen += len(t.Name)
+ }
+
+ if strconv.NeedToEscape(t.Value) {
+ idLen += strconv.QuotedLength(t.Value)
+ escaping.escapeValue = true
+ } else {
+ idLen += len(t.Value) + 2
+ }
+
+ return idLen, escaping
+}
+
+func writeTagLengthMeta(dst []byte, lengths []int) int {
+ idx := writer.WriteIntegers(dst, lengths, sep, 0)
+ dst[idx] = finish
+ return idx + 1
+}
+
+func prependMetaID(t Tags) []byte {
+ l, metaLengths := prependMetaLen(t)
+ // TODO: pool these bytes.
+ id := make([]byte, l)
+ idx := writeTagLengthMeta(id, metaLengths)
+ for _, tag := range t.Tags {
+ idx += copy(id[idx:], tag.Name)
+ idx += copy(id[idx:], tag.Value)
+ }
+
+ return id
+}
+
+func prependMetaLen(t Tags) (int, []int) {
+ idLen := 1 // account for separator
+ tagLengths := make([]int, len(t.Tags)*2)
+ for i, tag := range t.Tags {
+ tagLen := len(tag.Name)
+ tagLengths[2*i] = tagLen
+ idLen += tagLen
+ tagLen = len(tag.Value)
+ tagLengths[2*i+1] = tagLen
+ idLen += tagLen
+ }
+
+ prefixLen := writer.IntsLength(tagLengths)
+ return idLen + prefixLen, tagLengths
+}
+
+func idLenGraphite(t Tags) int {
+ idLen := t.Len() - 1 // account for separators
+ for _, tag := range t.Tags {
+ idLen += len(tag.Value)
+ }
+
+ return idLen
+}
+
+func graphiteID(t Tags) []byte {
+ // TODO: pool these bytes.
+ id := make([]byte, idLenGraphite(t))
+ idx := 0
+ lastIndex := len(t.Tags) - 1
+ for _, tag := range t.Tags[:lastIndex] {
+ idx += copy(id[idx:], tag.Value)
+ id[idx] = graphiteSep
+ idx++
+ }
+
+ copy(id[idx:], t.Tags[lastIndex].Value)
+ return id
+}
diff --git a/src/query/models/tags_test.go b/src/query/models/tags_test.go
index 90fd3aefb4..310eae7191 100644
--- a/src/query/models/tags_test.go
+++ b/src/query/models/tags_test.go
@@ -27,10 +27,12 @@ import (
"testing"
"unsafe"
+ "github.com/m3db/m3/src/query/graphite/graphite"
"github.com/m3db/m3/src/query/util/writer"
+ xerrors "github.com/m3db/m3/src/x/errors"
xtest "github.com/m3db/m3/src/x/test"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -50,7 +52,7 @@ func testLongTagIDOutOfOrder(t *testing.T, scheme IDSchemeType) Tags {
func TestLongTagNewIDOutOfOrderLegacy(t *testing.T) {
tags := testLongTagIDOutOfOrder(t, TypeLegacy)
actual := tags.ID()
- assert.Equal(t, tags.idLen(), len(actual))
+ assert.Equal(t, idLen(tags), len(actual))
assert.Equal(t, []byte("t1=v1,t2=v2,t3=v3,t4=v4,"), actual)
}
@@ -119,7 +121,7 @@ func TestLongTagNewIDOutOfOrderPrefixed(t *testing.T) {
tags := testLongTagIDOutOfOrder(t, TypePrependMeta).
AddTag(Tag{Name: []byte("t9"), Value: []byte(`"v1"t2"v2"`)})
actual := tags.ID()
- expectedLength, _ := tags.prependMetaLen()
+ expectedLength, _ := prependMetaLen(tags)
require.Equal(t, expectedLength, len(actual))
assert.Equal(t, []byte(`2,2,2,2,2,2,2,2,2,10!t1v1t2v2t3v3t4v4t9"v1"t2"v2"`), actual)
}
@@ -421,6 +423,110 @@ func TestWriteTagLengthMeta(t *testing.T) {
assert.Equal(t, []byte("0,1,2,8,10,8,100,8,101,8,110,123456,12345!"), buf)
}
+func TestTagsValidateEmptyNameQuoted(t *testing.T) {
+ tags := NewTags(0, NewTagOptions().SetIDSchemeType(TypeQuoted))
+ tags = tags.AddTag(Tag{Name: []byte(""), Value: []byte("bar")})
+ err := tags.Validate()
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
+
+func TestTagsValidateEmptyValueQuoted(t *testing.T) {
+ tags := NewTags(0, NewTagOptions().SetIDSchemeType(TypeQuoted))
+ tags = tags.AddTag(Tag{Name: []byte("foo"), Value: []byte("")})
+ err := tags.Validate()
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
+
+func TestTagsValidateOutOfOrderQuoted(t *testing.T) {
+ tags := NewTags(0, NewTagOptions().SetIDSchemeType(TypeQuoted))
+ tags.Tags = []Tag{
+ {
+ Name: []byte("foo"),
+ Value: []byte("bar"),
+ },
+ {
+ Name: []byte("bar"),
+ Value: []byte("baz"),
+ },
+ }
+ err := tags.Validate()
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+
+ // Test fixes after normalize.
+ tags.Normalize()
+ require.NoError(t, tags.Validate())
+}
+
+func TestTagsValidateDuplicateQuoted(t *testing.T) {
+ tags := NewTags(0, NewTagOptions().SetIDSchemeType(TypeQuoted))
+ tags = tags.AddTag(Tag{
+ Name: []byte("foo"),
+ Value: []byte("bar"),
+ })
+ tags = tags.AddTag(Tag{
+ Name: []byte("bar"),
+ Value: []byte("baz"),
+ })
+ tags = tags.AddTag(Tag{
+ Name: []byte("foo"),
+ Value: []byte("qux"),
+ })
+ err := tags.Validate()
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
+
+func TestTagsValidateEmptyNameGraphite(t *testing.T) {
+ tags := NewTags(0, NewTagOptions().SetIDSchemeType(TypeGraphite))
+ tags = tags.AddTag(Tag{Name: nil, Value: []byte("bar")})
+ err := tags.Validate()
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
+
+func TestTagsValidateOutOfOrderGraphite(t *testing.T) {
+ tags := NewTags(0, NewTagOptions().SetIDSchemeType(TypeGraphite))
+ tags.Tags = []Tag{
+ {
+ Name: graphite.TagName(10),
+ Value: []byte("foo"),
+ },
+ {
+ Name: graphite.TagName(2),
+ Value: []byte("bar"),
+ },
+ }
+ err := tags.Validate()
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+
+ // Test fixes after normalize.
+ tags.Normalize()
+ require.NoError(t, tags.Validate())
+}
+
+func TestTagsValidateDuplicateGraphite(t *testing.T) {
+ tags := NewTags(0, NewTagOptions().SetIDSchemeType(TypeGraphite))
+ tags = tags.AddTag(Tag{
+ Name: graphite.TagName(0),
+ Value: []byte("foo"),
+ })
+ tags = tags.AddTag(Tag{
+ Name: graphite.TagName(1),
+ Value: []byte("bar"),
+ })
+ tags = tags.AddTag(Tag{
+ Name: graphite.TagName(1),
+ Value: []byte("baz"),
+ })
+ err := tags.Validate()
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
+
func buildTags(b *testing.B, count, length int, opts TagOptions, escape bool) Tags {
tags := make([]Tag, count)
for i := range tags {
@@ -539,25 +645,25 @@ func BenchmarkIDs(b *testing.B) {
func TestSerializedLength(t *testing.T) {
tag := Tag{Name: []byte("foo"), Value: []byte("bar")}
- len, escaping := tag.serializedLength()
+ len, escaping := serializedLength(tag)
assert.Equal(t, 8, len)
assert.False(t, escaping.escapeName)
assert.False(t, escaping.escapeValue)
tag.Name = []byte("f\ao")
- len, escaping = tag.serializedLength()
+ len, escaping = serializedLength(tag)
assert.Equal(t, 9, len)
assert.True(t, escaping.escapeName)
assert.False(t, escaping.escapeValue)
tag.Value = []byte(`b"ar`)
- len, escaping = tag.serializedLength()
+ len, escaping = serializedLength(tag)
assert.Equal(t, 11, len)
assert.True(t, escaping.escapeName)
assert.True(t, escaping.escapeValue)
tag.Name = []byte("baz")
- len, escaping = tag.serializedLength()
+ len, escaping = serializedLength(tag)
assert.Equal(t, 10, len)
assert.False(t, escaping.escapeName)
assert.True(t, escaping.escapeValue)
diff --git a/src/query/models/types.go b/src/query/models/types.go
index 64558d6852..fe45c1f3ef 100644
--- a/src/query/models/types.go
+++ b/src/query/models/types.go
@@ -85,26 +85,41 @@ const (
type TagOptions interface {
// Validate validates these tag options.
Validate() error
+
// SetMetricName sets the name for the `metric name` tag.
- SetMetricName(metricName []byte) TagOptions
+ SetMetricName(value []byte) TagOptions
+
// MetricName gets the name for the `metric name` tag.
MetricName() []byte
+
// SetBucketName sets the name for the `bucket label` tag.
- SetBucketName(metricName []byte) TagOptions
+ SetBucketName(value []byte) TagOptions
+
// BucketName gets the name for the `bucket label` tag.
BucketName() []byte
+
// SetIDSchemeType sets the ID generation scheme type.
- SetIDSchemeType(scheme IDSchemeType) TagOptions
+ SetIDSchemeType(value IDSchemeType) TagOptions
+
// IDSchemeType gets the ID generation scheme type.
IDSchemeType() IDSchemeType
+
+ // SetFilters sets tag filters.
+ SetFilters(value Filters) TagOptions
+
+ // Filters gets the tag filters.
+ Filters() Filters
+
// Equals determines if two tag options are equivalent.
Equals(other TagOptions) bool
}
// Tags represents a set of tags with options.
type Tags struct {
- Opts TagOptions
- Tags []Tag
+ Opts TagOptions
+ Tags []Tag
+ hashedID uint64
+ id []byte
}
// Tag is a key/value metric tag pair.
@@ -148,3 +163,15 @@ type Metric struct {
// Metrics is a list of individual metrics.
type Metrics []Metric
+
+// Filters is a set of tag filters.
+type Filters []Filter
+
+// Filter is a regex tag filter.
+type Filter struct {
+ // Name is the name of the series.
+ Name []byte
+ // Values are a set of filter values. If this is unset, all series containing
+ // the tag name are filtered.
+ Values [][]byte
+}
diff --git a/src/query/parser/promql/matcher_test.go b/src/query/parser/promql/matcher_test.go
index 570a10ef78..79f67d7384 100644
--- a/src/query/parser/promql/matcher_test.go
+++ b/src/query/parser/promql/matcher_test.go
@@ -22,6 +22,7 @@ package promql
import (
"bytes"
+ "regexp"
"testing"
"github.com/m3db/m3/src/query/models"
@@ -35,30 +36,30 @@ func TestLabelMatchesToModelMatcher(t *testing.T) {
opts := models.NewTagOptions()
labels := []*labels.Matcher{
- &labels.Matcher{
+ {
Type: labels.MatchEqual,
Name: "foo",
},
- &labels.Matcher{
+ {
Type: labels.MatchEqual,
Name: "foo",
Value: "bar",
},
- &labels.Matcher{
+ {
Type: labels.MatchNotEqual,
Name: "foo",
},
- &labels.Matcher{
+ {
Type: labels.MatchNotEqual,
Name: "foo",
Value: "bar",
},
- &labels.Matcher{
+ {
Type: labels.MatchRegexp,
Name: "foo",
Value: ".*",
},
- &labels.Matcher{
+ {
Type: labels.MatchNotRegexp,
Name: "foo",
Value: ".*",
@@ -112,3 +113,50 @@ func TestLabelMatchesToModelMatcher(t *testing.T) {
assert.True(t, equalish(ex, matchers[i]))
}
}
+
+func TestSanitizeRegex(t *testing.T) {
+ tests := []struct {
+ data, expected string
+ }{
+ {data: "", expected: ""},
+
+ {data: "bar", expected: "bar"},
+
+ {data: "^bar", expected: "bar"},
+ {data: "b^ar", expected: "ar"},
+ {data: "ba^r", expected: "r"},
+ {data: "bar^", expected: ""},
+
+ {data: "bar$", expected: "bar"},
+ {data: "ba$r", expected: "ba"},
+ {data: "b$ar", expected: "b"},
+ {data: "$bar", expected: ""},
+
+ {data: "b^a$r", expected: "a"},
+ {data: "^bar$", expected: "bar"},
+
+ {data: "b$^ar", expected: ""},
+ {data: "b$ar^", expected: ""},
+
+ {data: `ba\^r`, expected: `ba\^r`},
+ {data: `ba\$r`, expected: `ba\$r`},
+ {data: `b^a\$r`, expected: `a\$r`},
+ {data: `b$a\$r`, expected: `b`},
+
+ {data: "b[$^]ar", expected: "b[$^]ar"},
+ {data: "b[^$]ar", expected: "b[^$]ar"},
+
+ {data: `b[^\]$]ar`, expected: `b[^\]$]ar`},
+ {data: `b[^\]$]^ar`, expected: "ar"},
+ {data: `b[^\]$]$ar`, expected: `b[^\]$]`},
+
+ {data: `b\[^\]$]$ar`, expected: `\]$]`},
+ }
+
+ for _, tt := range tests {
+ ac := sanitizeRegex([]byte(tt.data))
+ assert.Equal(t, tt.expected, string(ac))
+ _, err := regexp.Compile("^(?:" + string(ac) + ")$")
+ assert.NoError(t, err)
+ }
+}
diff --git a/src/query/parser/promql/matchers.go b/src/query/parser/promql/matchers.go
index 0261fd46f9..af1237cbd3 100644
--- a/src/query/parser/promql/matchers.go
+++ b/src/query/parser/promql/matchers.go
@@ -36,8 +36,9 @@ import (
"github.com/m3db/m3/src/query/parser"
"github.com/m3db/m3/src/query/parser/common"
+ "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
- "github.com/prometheus/prometheus/promql"
+ promql "github.com/prometheus/prometheus/promql/parser"
)
// NewSelectorFromVector creates a new fetchop.
@@ -62,14 +63,15 @@ func NewSelectorFromMatrix(
n *promql.MatrixSelector,
tagOpts models.TagOptions,
) (parser.Params, error) {
- matchers, err := LabelMatchersToModelMatcher(n.LabelMatchers, tagOpts)
+ vectorSelector := n.VectorSelector.(*promql.VectorSelector)
+ matchers, err := LabelMatchersToModelMatcher(vectorSelector.LabelMatchers, tagOpts)
if err != nil {
return nil, err
}
return functions.FetchOp{
- Name: n.Name,
- Offset: n.Offset,
+ Name: vectorSelector.Name,
+ Offset: vectorSelector.Offset,
Matchers: matchers,
Range: n.Range,
}, nil
@@ -113,29 +115,30 @@ func NewAggregationOperator(expr *promql.AggregateExpr) (parser.Params, error) {
func getAggOpType(opType promql.ItemType) string {
switch opType {
- case promql.ItemSum:
+ case promql.SUM:
return aggregation.SumType
- case promql.ItemMin:
+ case promql.MIN:
return aggregation.MinType
- case promql.ItemMax:
+ case promql.MAX:
return aggregation.MaxType
- case promql.ItemAvg:
+ case promql.AVG:
return aggregation.AverageType
- case promql.ItemStddev:
+ case promql.STDDEV:
return aggregation.StandardDeviationType
- case promql.ItemStdvar:
+ case promql.STDVAR:
return aggregation.StandardVarianceType
- case promql.ItemCount:
+ case promql.COUNT:
return aggregation.CountType
- case promql.ItemTopK:
+ case promql.TOPK:
return aggregation.TopKType
- case promql.ItemBottomK:
+ case promql.BOTTOMK:
return aggregation.BottomKType
- case promql.ItemQuantile:
+ case promql.QUANTILE:
return aggregation.QuantileType
- case promql.ItemCountValues:
+ case promql.COUNT_VALUES:
return aggregation.CountValuesType
+
default:
return common.UnknownOpType
}
@@ -183,6 +186,7 @@ func NewFunctionExpr(
argValues []interface{},
stringValues []string,
hasArgValue bool,
+ inner string,
tagOptions models.TagOptions,
) (parser.Params, bool, error) {
var (
@@ -272,37 +276,37 @@ func NewFunctionExpr(
func getBinaryOpType(opType promql.ItemType) string {
switch opType {
- case promql.ItemLAND:
+ case promql.LAND:
return binary.AndType
- case promql.ItemLOR:
+ case promql.LOR:
return binary.OrType
- case promql.ItemLUnless:
+ case promql.LUNLESS:
return binary.UnlessType
- case promql.ItemADD:
+ case promql.ADD:
return binary.PlusType
- case promql.ItemSUB:
+ case promql.SUB:
return binary.MinusType
- case promql.ItemMUL:
+ case promql.MUL:
return binary.MultiplyType
- case promql.ItemDIV:
+ case promql.DIV:
return binary.DivType
- case promql.ItemPOW:
+ case promql.POW:
return binary.ExpType
- case promql.ItemMOD:
+ case promql.MOD:
return binary.ModType
- case promql.ItemEQL:
+ case promql.EQL:
return binary.EqType
- case promql.ItemNEQ:
+ case promql.NEQ:
return binary.NotEqType
- case promql.ItemGTR:
+ case promql.GTR:
return binary.GreaterType
- case promql.ItemLSS:
+ case promql.LSS:
return binary.LesserType
- case promql.ItemGTE:
+ case promql.GTE:
return binary.GreaterEqType
- case promql.ItemLTE:
+ case promql.LTE:
return binary.LesserEqType
default:
@@ -313,9 +317,9 @@ func getBinaryOpType(opType promql.ItemType) string {
// getUnaryOpType returns the M3 unary op type based on the Prom op type.
func getUnaryOpType(opType promql.ItemType) (string, error) {
switch opType {
- case promql.ItemADD:
+ case promql.ADD:
return binary.PlusType, nil
- case promql.ItemSUB:
+ case promql.SUB:
return binary.MinusType, nil
default:
return "", fmt.Errorf(
@@ -325,7 +329,54 @@ func getUnaryOpType(opType promql.ItemType) (string, error) {
}
}
-const promDefaultName = "__name__"
+const (
+ anchorStart = byte('^')
+ anchorEnd = byte('$')
+ escapeChar = byte('\\')
+ startGroup = byte('[')
+ endGroup = byte(']')
+)
+
+func sanitizeRegex(value []byte) []byte {
+ lIndex := 0
+ rIndex := len(value)
+ escape := false
+ inGroup := false
+ for i, b := range value {
+ if escape {
+ escape = false
+ continue
+ }
+
+ if inGroup {
+ switch b {
+ case escapeChar:
+ escape = true
+ case endGroup:
+ inGroup = false
+ }
+
+ continue
+ }
+
+ switch b {
+ case anchorStart:
+ lIndex = i + 1
+ case anchorEnd:
+ rIndex = i
+ case escapeChar:
+ escape = true
+ case startGroup:
+ inGroup = true
+ }
+ }
+
+ if lIndex > rIndex {
+ return []byte{}
+ }
+
+ return value[lIndex:rIndex]
+}
// LabelMatchersToModelMatcher parses promql matchers to model matchers.
func LabelMatchersToModelMatcher(
@@ -334,13 +385,14 @@ func LabelMatchersToModelMatcher(
) (models.Matchers, error) {
matchers := make(models.Matchers, 0, len(lMatchers))
for _, m := range lMatchers {
+ // here.
matchType, err := promTypeToM3(m.Type)
if err != nil {
return nil, err
}
var name []byte
- if m.Name == promDefaultName {
+ if m.Name == model.MetricNameLabel {
name = tagOpts.MetricName()
} else {
name = []byte(m.Name)
@@ -359,6 +411,13 @@ func LabelMatchersToModelMatcher(
}
}
+ if matchType == models.MatchRegexp || matchType == models.MatchNotRegexp {
+ // NB: special case here since tags such as `{foo=~"$bar"}` are valid in
+ // prometheus regex patterns, but invalid with m3 index queries. Simplify
+ // these matchers here.
+ value = sanitizeRegex(value)
+ }
+
match, err := models.NewMatcher(matchType, name, value)
if err != nil {
return nil, err
diff --git a/src/query/parser/promql/options.go b/src/query/parser/promql/options.go
index d9a1ac6b6d..036da45718 100644
--- a/src/query/parser/promql/options.go
+++ b/src/query/parser/promql/options.go
@@ -23,7 +23,8 @@ package promql
import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
- pql "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/pkg/labels"
+ pql "github.com/prometheus/prometheus/promql/parser"
)
// ParseFunctionExpr parses arguments to a function expression, returning
@@ -34,6 +35,7 @@ type ParseFunctionExpr func(
argValues []interface{},
stringValues []string,
hasArgValue bool,
+ inner string,
tagOptions models.TagOptions,
) (parser.Params, bool, error)
@@ -44,6 +46,13 @@ func defaultParseFn(query string) (pql.Expr, error) {
return pql.ParseExpr(query)
}
+// MetricSelectorFn is a function that parses a query to Prometheus selectors.
+type MetricSelectorFn func(query string) ([]*labels.Matcher, error)
+
+func defaultMetricSelectorFn(query string) ([]*labels.Matcher, error) {
+ return pql.ParseMetricSelector(query)
+}
+
// ParseOptions are options for the Prometheus parser.
type ParseOptions interface {
// ParseFn gets the parse function.
@@ -51,6 +60,11 @@ type ParseOptions interface {
// SetParseFn sets the parse function.
SetParseFn(f ParseFn) ParseOptions
+ // MetricSelectorFn gets the metric selector function.
+ MetricSelectorFn() MetricSelectorFn
+ // SetMetricSelectorFn sets the metric selector function.
+ SetMetricSelectorFn(f MetricSelectorFn) ParseOptions
+
// FunctionParseExpr gets the parsing function.
FunctionParseExpr() ParseFunctionExpr
// SetFunctionParseExpr sets the parsing function.
@@ -58,25 +72,37 @@ type ParseOptions interface {
}
type parseOptions struct {
- fn ParseFn
+ parseFn ParseFn
+ selectorFn MetricSelectorFn
fnParseExpr ParseFunctionExpr
}
// NewParseOptions creates a new parse options.
func NewParseOptions() ParseOptions {
return &parseOptions{
- fn: defaultParseFn,
+ parseFn: defaultParseFn,
+ selectorFn: defaultMetricSelectorFn,
fnParseExpr: NewFunctionExpr,
}
}
func (o *parseOptions) ParseFn() ParseFn {
- return o.fn
+ return o.parseFn
}
func (o *parseOptions) SetParseFn(f ParseFn) ParseOptions {
opts := *o
- opts.fn = f
+ opts.parseFn = f
+ return &opts
+}
+
+func (o *parseOptions) MetricSelectorFn() MetricSelectorFn {
+ return o.selectorFn
+}
+
+func (o *parseOptions) SetMetricSelectorFn(f MetricSelectorFn) ParseOptions {
+ opts := *o
+ opts.selectorFn = f
return &opts
}
diff --git a/src/query/parser/promql/parse.go b/src/query/parser/promql/parse.go
index cf2f407bc9..9789e2307e 100644
--- a/src/query/parser/promql/parse.go
+++ b/src/query/parser/promql/parse.go
@@ -31,7 +31,7 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
- pql "github.com/prometheus/prometheus/promql"
+ pql "github.com/prometheus/prometheus/promql/parser"
)
type promParser struct {
@@ -200,7 +200,8 @@ func (p *parseState) walk(node pql.Node) error {
case *pql.MatrixSelector:
// Align offset to stepSize.
- n.Offset = adjustOffset(n.Offset, p.stepSize)
+ vectorSelector := n.VectorSelector.(*pql.VectorSelector)
+ vectorSelector.Offset = adjustOffset(vectorSelector.Offset, p.stepSize)
operation, err := NewSelectorFromMatrix(n, p.tagOpts)
if err != nil {
return err
@@ -210,7 +211,7 @@ func (p *parseState) walk(node pql.Node) error {
p.transforms,
parser.NewTransformFromOperation(operation, p.transformLen()),
)
- return p.addLazyOffsetTransform(n.Offset)
+ return p.addLazyOffsetTransform(vectorSelector.Offset)
case *pql.VectorSelector:
// Align offset to stepSize.
@@ -231,7 +232,7 @@ func (p *parseState) walk(node pql.Node) error {
if n.Func.Name == scalar.VectorType {
if len(n.Args) != 1 {
return fmt.Errorf(
- "scalar() operation must be called with 1 argument, got %d",
+ "vector() operation must be called with 1 argument, got %d",
len(n.Args),
)
}
@@ -323,7 +324,7 @@ func (p *parseState) walk(node pql.Node) error {
}
op, ok, err := p.parseFunctionExpr(n.Func.Name, argValues,
- stringValues, hasValue, p.tagOpts)
+ stringValues, hasValue, n.Args.String(), p.tagOpts)
if err != nil {
return err
}
diff --git a/src/query/parser/promql/parse_test.go b/src/query/parser/promql/parse_test.go
index 5fb6cf5f9d..0dd831505b 100644
--- a/src/query/parser/promql/parse_test.go
+++ b/src/query/parser/promql/parse_test.go
@@ -36,8 +36,7 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/parser"
- "github.com/prometheus/prometheus/promql"
- pql "github.com/prometheus/prometheus/promql"
+ pql "github.com/prometheus/prometheus/promql/parser"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -119,11 +118,11 @@ func TestInvalidUnary(t *testing.T) {
}
func TestGetUnaryOpType(t *testing.T) {
- unaryOpType, err := getUnaryOpType(promql.ItemADD)
+ unaryOpType, err := getUnaryOpType(pql.ADD)
require.NoError(t, err)
assert.Equal(t, binary.PlusType, unaryOpType)
- _, err = getUnaryOpType(promql.ItemEQL)
+ _, err = getUnaryOpType(pql.EQL)
require.Error(t, err)
}
@@ -548,7 +547,7 @@ func TestCustomSort(t *testing.T) {
}
fn := func(s string, _ []interface{}, _ []string,
- _ bool, _ models.TagOptions) (parser.Params, bool, error) {
+ _ bool, _ string, _ models.TagOptions) (parser.Params, bool, error) {
return customParam{s}, true, nil
}
diff --git a/src/query/parser/promql/resolve_scalars.go b/src/query/parser/promql/resolve_scalars.go
index 1895dead69..4388daa82d 100644
--- a/src/query/parser/promql/resolve_scalars.go
+++ b/src/query/parser/promql/resolve_scalars.go
@@ -26,7 +26,7 @@ import (
"github.com/m3db/m3/src/query/functions/binary"
- pql "github.com/prometheus/prometheus/promql"
+ pql "github.com/prometheus/prometheus/promql/parser"
)
var (
diff --git a/src/query/plan/physical.go b/src/query/plan/physical.go
index 325d4435b1..9a2cceb305 100644
--- a/src/query/plan/physical.go
+++ b/src/query/plan/physical.go
@@ -87,8 +87,7 @@ func NewPhysicalPlan(
func (p PhysicalPlan) shiftTime() PhysicalPlan {
var maxRange time.Duration
- // Start offset with lookback
- maxOffset := p.LookbackDuration
+
for _, transformID := range p.pipeline {
node := p.steps[transformID]
boundOp, ok := node.Transform.Op.(transform.BoundOp)
@@ -97,25 +96,28 @@ func (p PhysicalPlan) shiftTime() PhysicalPlan {
}
spec := boundOp.Bounds()
- if spec.Offset+p.LookbackDuration > maxOffset {
- maxOffset = spec.Offset + p.LookbackDuration
- }
if spec.Range > maxRange {
maxRange = spec.Range
}
}
- startShift := maxOffset + maxRange
- shift := startShift % p.TimeSpec.Step
- extraStep := p.TimeSpec.Step
- if shift == 0 {
- // NB: if the start is divisible by offset, no need to take an extra step.
- extraStep = 0
+ startShift := p.LookbackDuration
+ if maxRange > 0 {
+ startShift = maxRange
+ }
+
+ remainder := startShift % p.TimeSpec.Step
+ var extraShift time.Duration
+ if remainder != 0 {
+ // Align the shift to be divisible by step.
+ extraShift = p.TimeSpec.Step - remainder
}
- alignedShift := startShift - extraStep - shift
+ alignedShift := startShift + extraShift
+
p.TimeSpec.Start = p.TimeSpec.Start.Add(-1 * alignedShift)
+
return p
}
diff --git a/src/query/plan/physical_test.go b/src/query/plan/physical_test.go
index cfe5a72f6c..04869d0ed1 100644
--- a/src/query/plan/physical_test.go
+++ b/src/query/plan/physical_test.go
@@ -21,7 +21,6 @@
package plan
import (
- "fmt"
"testing"
"time"
@@ -34,14 +33,10 @@ import (
"github.com/stretchr/testify/require"
)
-var (
- defaultLookbackDuration = time.Minute
-)
-
func testRequestParams() models.RequestParams {
return models.RequestParams{
Now: time.Now(),
- LookbackDuration: defaultLookbackDuration,
+ LookbackDuration: 5 * time.Minute,
Step: time.Second,
}
}
@@ -70,34 +65,77 @@ func TestResultNode(t *testing.T) {
}
func TestShiftTime(t *testing.T) {
- fetchTransform := parser.NewTransformFromOperation(functions.FetchOp{}, 1)
- agg, err := aggregation.NewAggregationOp(aggregation.CountType, aggregation.NodeParams{})
- require.NoError(t, err)
- countTransform := parser.NewTransformFromOperation(agg, 2)
- transforms := parser.Nodes{fetchTransform, countTransform}
- edges := parser.Edges{
- parser.Edge{
- ParentID: fetchTransform.ID,
- ChildID: countTransform.ID,
+ tests := []struct {
+ name string
+ fetchOp functions.FetchOp
+ lookbackDuration time.Duration
+ step time.Duration
+ wantShiftBy time.Duration
+ }{
+ {
+ name: "shift by lookbackDuration",
+ fetchOp: functions.FetchOp{},
+ lookbackDuration: 15 * time.Minute,
+ step: time.Second,
+ wantShiftBy: 15 * time.Minute,
+ },
+ {
+ name: "shift by range",
+ fetchOp: functions.FetchOp{Range: time.Hour},
+ lookbackDuration: 5 * time.Minute,
+ step: time.Second,
+ wantShiftBy: time.Hour,
+ },
+ {
+ name: "align the lookback based shift by step",
+ fetchOp: functions.FetchOp{},
+ lookbackDuration: 5 * time.Second,
+ step: 15 * time.Second,
+ wantShiftBy: 15 * time.Second, // lookback = 5, aligned to 1x step (15)
+ },
+ {
+ name: "align the range based shift by step",
+ fetchOp: functions.FetchOp{Range: 16 * time.Second},
+ lookbackDuration: 5 * time.Second,
+ step: 15 * time.Second,
+ wantShiftBy: 30 * time.Second, // range = 16, aligned to 2x step (2 * 15)
+ },
+ {
+ name: "keep the same shift if already aligned by step",
+ fetchOp: functions.FetchOp{Range: 30 * time.Second},
+ lookbackDuration: 5 * time.Second,
+ step: 15 * time.Second,
+ wantShiftBy: 30 * time.Second, // range = 30, divisible by step
},
}
- lp, _ := NewLogicalPlan(transforms, edges)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
- params := testRequestParams()
- params.Start = params.Now.Add(-1 * time.Hour)
+ fetchTransform := parser.NewTransformFromOperation(tt.fetchOp, 1)
+ agg, err := aggregation.NewAggregationOp(aggregation.CountType, aggregation.NodeParams{})
+ require.NoError(t, err)
- p, err := NewPhysicalPlan(lp, params)
- require.NoError(t, err)
- assert.Equal(t, params.Start.Add(-1*params.LookbackDuration),
- p.TimeSpec.Start, fmt.Sprintf("start is not now - lookback"))
- fetchTransform = parser.NewTransformFromOperation(
- functions.FetchOp{Offset: time.Minute, Range: time.Hour}, 1)
- transforms = parser.Nodes{fetchTransform, countTransform}
- lp, _ = NewLogicalPlan(transforms, edges)
- p, err = NewPhysicalPlan(lp, params)
- require.NoError(t, err)
- assert.Equal(t, params.Start.
- Add(-1*(time.Minute+time.Hour+defaultLookbackDuration)), p.TimeSpec.Start,
- "start time offset by fetch")
+ countTransform := parser.NewTransformFromOperation(agg, 2)
+ transforms := parser.Nodes{fetchTransform, countTransform}
+ edges := parser.Edges{
+ parser.Edge{
+ ParentID: fetchTransform.ID,
+ ChildID: countTransform.ID,
+ },
+ }
+
+ lp, _ := NewLogicalPlan(transforms, edges)
+
+ params := models.RequestParams{
+ Now: time.Now(),
+ LookbackDuration: tt.lookbackDuration,
+ Step: tt.step,
+ }
+
+ p, err := NewPhysicalPlan(lp, params)
+ require.NoError(t, err)
+ assert.Equal(t, tt.wantShiftBy.String(), params.Start.Sub(p.TimeSpec.Start).String(), "start time shifted by")
+ })
+ }
}
diff --git a/src/query/pools/query_pools.go b/src/query/pools/query_pools.go
index caef363686..65c5c7ad99 100644
--- a/src/query/pools/query_pools.go
+++ b/src/query/pools/query_pools.go
@@ -23,9 +23,9 @@ package pools
import (
"io"
- "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
+ "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/x/xpool"
xconfig "github.com/m3db/m3/src/x/config"
"github.com/m3db/m3/src/x/ident"
@@ -39,11 +39,32 @@ import (
const (
// TODO: add capabilities to get this from configs
- replicas = 1
- iteratorPoolSize = 65536
- checkedBytesWrapperPoolSize = 65536
- defaultIdentifierPoolSize = 8192
- defaultBucketCapacity = 256
+ defaultReplicas = 3
+ defaultSeriesIteratorPoolSize = 2 << 12 // ~8k
+ defaultCheckedBytesWrapperPoolSize = 2 << 12 // ~8k
+ defaultBucketCapacity = 256
+ defaultPoolableConcurrentQueries = 64
+ defaultPoolableSeriesPerQuery = 4096
+ defaultSeriesReplicaReaderPoolSize = defaultPoolableConcurrentQueries * defaultPoolableSeriesPerQuery * defaultReplicas
+)
+
+var (
+ defaultSeriesIteratorsPoolBuckets = []pool.Bucket{
+ {
+ Capacity: defaultPoolableSeriesPerQuery,
+ Count: defaultPoolableConcurrentQueries,
+ },
+ }
+ defaultSeriesIDBytesPoolBuckets = []pool.Bucket{
+ {
+ Capacity: 256, // Can pool IDs up to 256 in size with this bucket.
+ Count: defaultPoolableSeriesPerQuery,
+ },
+ {
+ Capacity: 1024, // Can pool IDs up to 1024 in size with this bucket.
+ Count: defaultPoolableSeriesPerQuery,
+ },
+ }
)
// BuildWorkerPools builds a worker pool
@@ -116,75 +137,128 @@ func (s sessionPools) TagDecoder() serialize.TagDecoderPool {
return s.tagDecoder
}
-func buildBuckets() []pool.Bucket {
- return []pool.Bucket{
- {Capacity: defaultBucketCapacity, Count: defaultIdentifierPoolSize},
+// BuildIteratorPoolsOptions is a set of build iterator pools.
+type BuildIteratorPoolsOptions struct {
+ Replicas int
+ SeriesIteratorPoolSize int
+ SeriesIteratorsPoolBuckets []pool.Bucket
+ SeriesIDBytesPoolBuckets []pool.Bucket
+ CheckedBytesWrapperPoolSize int
+}
+
+// ReplicasOrDefault returns the replicas or default.
+func (o BuildIteratorPoolsOptions) ReplicasOrDefault() int {
+ if o.Replicas <= 0 {
+ return defaultReplicas
}
+ return o.Replicas
+}
+
+// SeriesIteratorPoolSizeOrDefault returns the replicas or default.
+func (o BuildIteratorPoolsOptions) SeriesIteratorPoolSizeOrDefault() int {
+ if o.SeriesIteratorPoolSize <= 0 {
+ return defaultSeriesIteratorPoolSize
+ }
+ return o.SeriesIteratorPoolSize
+}
+
+// CheckedBytesWrapperPoolSizeOrDefault returns the checked bytes
+// wrapper pool size or default.
+func (o BuildIteratorPoolsOptions) CheckedBytesWrapperPoolSizeOrDefault() int {
+ if o.CheckedBytesWrapperPoolSize <= 0 {
+ return defaultCheckedBytesWrapperPoolSize
+ }
+ return o.CheckedBytesWrapperPoolSize
+}
+
+// SeriesIteratorsPoolBucketsOrDefault returns the series iterator pool
+// buckets or defaults.
+func (o BuildIteratorPoolsOptions) SeriesIteratorsPoolBucketsOrDefault() []pool.Bucket {
+ if len(o.SeriesIteratorsPoolBuckets) == 0 {
+ return defaultSeriesIteratorsPoolBuckets
+ }
+ return o.SeriesIteratorsPoolBuckets
+}
+
+// SeriesIDBytesPoolBucketsOrDefault returns the bytes pool buckets or defaults.
+func (o BuildIteratorPoolsOptions) SeriesIDBytesPoolBucketsOrDefault() []pool.Bucket {
+ if len(o.SeriesIDBytesPoolBuckets) == 0 {
+ return defaultSeriesIDBytesPoolBuckets
+ }
+ return o.SeriesIDBytesPoolBuckets
}
// BuildIteratorPools build iterator pools if they are unavailable from
// m3db (e.g. if running standalone query)
-func BuildIteratorPools() encoding.IteratorPools {
+func BuildIteratorPools(
+ opts BuildIteratorPoolsOptions,
+) encoding.IteratorPools {
// TODO: add instrumentation options to these pools
pools := sessionPools{}
- pools.multiReaderIteratorArray = encoding.NewMultiReaderIteratorArrayPool([]pool.Bucket{
- pool.Bucket{
- Capacity: replicas,
- Count: iteratorPoolSize,
- },
- })
+
+ defaultPerSeriesIteratorsBuckets := opts.SeriesIteratorsPoolBucketsOrDefault()
+
+ pools.multiReaderIteratorArray = encoding.NewMultiReaderIteratorArrayPool(defaultPerSeriesIteratorsBuckets)
pools.multiReaderIteratorArray.Init()
- size := replicas * iteratorPoolSize
- poolOpts := pool.NewObjectPoolOptions().
- SetSize(size)
- pools.multiReaderIterator = encoding.NewMultiReaderIteratorPool(poolOpts)
- encodingOpts := encoding.NewOptions()
- readerIterAlloc := func(r io.Reader, _ namespace.SchemaDescr) encoding.ReaderIterator {
- intOptimized := m3tsz.DefaultIntOptimizationEnabled
- return m3tsz.NewReaderIterator(r, intOptimized, encodingOpts)
- }
+ defaultPerSeriesPoolOpts := pool.NewObjectPoolOptions().
+ SetSize(opts.SeriesIteratorPoolSizeOrDefault())
- pools.multiReaderIterator.Init(readerIterAlloc)
+ readerIteratorPoolPoolOpts := pool.NewObjectPoolOptions().
+ SetSize(opts.SeriesIteratorPoolSizeOrDefault() * opts.ReplicasOrDefault())
- seriesIteratorPoolOpts := pool.NewObjectPoolOptions().
- SetSize(iteratorPoolSize)
- pools.seriesIterator = encoding.NewSeriesIteratorPool(seriesIteratorPoolOpts)
+ readerIteratorPool := encoding.NewReaderIteratorPool(readerIteratorPoolPoolOpts)
+
+ encodingOpts := encoding.NewOptions().
+ SetReaderIteratorPool(readerIteratorPool)
+
+ readerIteratorPool.Init(func(r io.Reader, descr namespace.SchemaDescr) encoding.ReaderIterator {
+ return m3tsz.NewReaderIterator(r, m3tsz.DefaultIntOptimizationEnabled, encodingOpts)
+ })
+
+ pools.multiReaderIterator = encoding.NewMultiReaderIteratorPool(defaultPerSeriesPoolOpts)
+ pools.multiReaderIterator.Init(func(r io.Reader, s namespace.SchemaDescr) encoding.ReaderIterator {
+ iter := readerIteratorPool.Get()
+ iter.Reset(r, s)
+ return iter
+ })
+
+ pools.seriesIterator = encoding.NewSeriesIteratorPool(defaultPerSeriesPoolOpts)
pools.seriesIterator.Init()
- pools.seriesIterators = encoding.NewMutableSeriesIteratorsPool(buildBuckets())
+ pools.seriesIterators = encoding.NewMutableSeriesIteratorsPool(defaultPerSeriesIteratorsBuckets)
pools.seriesIterators.Init()
wrapperPoolOpts := pool.NewObjectPoolOptions().
- SetSize(checkedBytesWrapperPoolSize)
+ SetSize(opts.CheckedBytesWrapperPoolSizeOrDefault())
pools.checkedBytesWrapper = xpool.NewCheckedBytesWrapperPool(wrapperPoolOpts)
pools.checkedBytesWrapper.Init()
pools.tagEncoder = serialize.NewTagEncoderPool(
serialize.NewTagEncoderOptions(),
- pool.NewObjectPoolOptions(),
- )
+ defaultPerSeriesPoolOpts)
pools.tagEncoder.Init()
- pools.tagDecoder = serialize.NewTagDecoderPool(
- serialize.NewTagDecoderOptions(),
- pool.NewObjectPoolOptions(),
- )
+ tagDecoderCheckBytesWrapperPoolSize := 0
+ tagDecoderOpts := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{
+ // We pass in a preallocated pool so use a zero sized pool in options init.
+ CheckBytesWrapperPoolSize: &tagDecoderCheckBytesWrapperPoolSize,
+ })
+ tagDecoderOpts = tagDecoderOpts.SetCheckedBytesWrapperPool(pools.checkedBytesWrapper)
+
+ pools.tagDecoder = serialize.NewTagDecoderPool(tagDecoderOpts, defaultPerSeriesPoolOpts)
pools.tagDecoder.Init()
- bytesPool := pool.NewCheckedBytesPool(buildBuckets(), nil,
- func(sizes []pool.Bucket) pool.BytesPool {
+ bytesPool := pool.NewCheckedBytesPool(opts.SeriesIDBytesPoolBucketsOrDefault(),
+ nil, func(sizes []pool.Bucket) pool.BytesPool {
return pool.NewBytesPool(sizes, nil)
})
bytesPool.Init()
- idPoolOpts := pool.NewObjectPoolOptions().
- SetSize(defaultIdentifierPoolSize)
-
pools.id = ident.NewPool(bytesPool, ident.PoolOptions{
- IDPoolOptions: idPoolOpts,
- TagsPoolOptions: idPoolOpts,
- TagsIteratorPoolOptions: idPoolOpts,
+ IDPoolOptions: defaultPerSeriesPoolOpts,
+ TagsPoolOptions: defaultPerSeriesPoolOpts,
+ TagsIteratorPoolOptions: defaultPerSeriesPoolOpts,
})
return pools
diff --git a/src/query/pools/query_pools_test.go b/src/query/pools/query_pools_test.go
new file mode 100644
index 0000000000..1a0244aee5
--- /dev/null
+++ b/src/query/pools/query_pools_test.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package pools
+
+import (
+ "runtime"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestBuildIteratorPoolsHasSaneDefaults(t *testing.T) {
+ var stats runtime.MemStats
+ runtime.ReadMemStats(&stats)
+
+ // TotalAlloc increases as heap objects are allocated, but
+ // unlike Alloc and HeapAlloc, it does not decrease when
+ // objects are freed.
+ totalAllocBefore := stats.TotalAlloc
+
+ BuildIteratorPools(BuildIteratorPoolsOptions{})
+
+ runtime.ReadMemStats(&stats)
+
+ allocated := int(stats.TotalAlloc - totalAllocBefore)
+ t.Logf("allocated %v bytes", allocated)
+
+ upperLimit := 64 * 1024 * 1024 // 64mb
+ require.True(t, allocated < upperLimit,
+ "allocated more than "+strconv.Itoa(upperLimit)+" bytes")
+}
diff --git a/src/query/remote/client.go b/src/query/remote/client.go
index 849b563047..e7fc702471 100644
--- a/src/query/remote/client.go
+++ b/src/query/remote/client.go
@@ -33,6 +33,7 @@ import (
"github.com/m3db/m3/src/query/pools"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/ts/m3db"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/instrument"
@@ -109,7 +110,7 @@ func (c *grpcClient) FetchProm(
}
return storage.SeriesIteratorsToPromResult(
- result.SeriesIterators, c.opts.ReadWorkerPool(), result.Metadata,
+ result, c.opts.ReadWorkerPool(),
options.Enforcer, c.opts.TagOptions())
}
@@ -117,8 +118,8 @@ func (c *grpcClient) fetchRaw(
ctx context.Context,
query *storage.FetchQuery,
options *storage.FetchOptions,
-) (m3.SeriesFetchResult, error) {
- fetchResult := m3.SeriesFetchResult{
+) (consolidators.SeriesFetchResult, error) {
+ fetchResult := consolidators.SeriesFetchResult{
Metadata: block.NewResultMetadata(),
}
@@ -169,7 +170,7 @@ func (c *grpcClient) fetchRaw(
receivedMeta := decodeResultMetadata(result.GetMeta())
meta = meta.CombineMetadata(receivedMeta)
- iters, err := decodeCompressedFetchResponse(result, pools)
+ iters, err := DecodeCompressedFetchResponse(result, pools)
if err != nil {
return fetchResult, err
}
@@ -177,13 +178,14 @@ func (c *grpcClient) fetchRaw(
seriesIterators = append(seriesIterators, iters.Iters()...)
}
- fetchResult.Metadata = meta
- fetchResult.SeriesIterators = encoding.NewSeriesIterators(
- seriesIterators,
- pools.MutableSeriesIterators(),
+ return consolidators.NewSeriesFetchResult(
+ encoding.NewSeriesIterators(
+ seriesIterators,
+ pools.MutableSeriesIterators(),
+ ),
+ nil,
+ meta,
)
-
- return fetchResult, nil
}
func (c *grpcClient) FetchBlocks(
@@ -270,7 +272,7 @@ func (c *grpcClient) CompleteTags(
ctx context.Context,
query *storage.CompleteTagsQuery,
options *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
request, err := encodeCompleteTagsRequest(query, options)
if err != nil {
return nil, err
@@ -286,7 +288,7 @@ func (c *grpcClient) CompleteTags(
return nil, err
}
- tags := make([]storage.CompletedTag, 0, initResultSize)
+ tags := make([]consolidators.CompletedTag, 0, initResultSize)
meta := block.NewResultMetadata()
defer completeTagsClient.CloseSend()
for {
@@ -314,7 +316,7 @@ func (c *grpcClient) CompleteTags(
tags = append(tags, result...)
}
- return &storage.CompleteTagsResult{
+ return &consolidators.CompleteTagsResult{
CompleteNameOnly: query.CompleteNameOnly,
CompletedTags: tags,
Metadata: meta,
diff --git a/src/query/remote/codecs.go b/src/query/remote/codecs.go
index 1404420595..b499572b52 100644
--- a/src/query/remote/codecs.go
+++ b/src/query/remote/codecs.go
@@ -34,6 +34,7 @@ import (
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/instrument"
@@ -164,7 +165,7 @@ func encodeFetchOptions(options *storage.FetchOptions) (*rpc.FetchOptions, error
fanoutOpts := options.FanoutOptions
result := &rpc.FetchOptions{
- Limit: int64(options.Limit),
+ Limit: int64(options.SeriesLimit),
IncludeResolution: options.IncludeResolution,
}
@@ -215,9 +216,9 @@ func encodeRestrictQueryOptionsByType(
result := &rpcpb.RestrictQueryType{}
switch o.MetricsType {
- case storage.UnaggregatedMetricsType:
+ case storagemetadata.UnaggregatedMetricsType:
result.MetricsType = rpcpb.MetricsType_UNAGGREGATED_METRICS_TYPE
- case storage.AggregatedMetricsType:
+ case storagemetadata.AggregatedMetricsType:
result.MetricsType = rpcpb.MetricsType_AGGREGATED_METRICS_TYPE
storagePolicyProto, err := o.StoragePolicy.Proto()
@@ -391,9 +392,9 @@ func decodeRestrictQueryOptionsByType(
result := &storage.RestrictByType{}
switch p.GetMetricsType() {
case rpcpb.MetricsType_UNAGGREGATED_METRICS_TYPE:
- result.MetricsType = storage.UnaggregatedMetricsType
+ result.MetricsType = storagemetadata.UnaggregatedMetricsType
case rpcpb.MetricsType_AGGREGATED_METRICS_TYPE:
- result.MetricsType = storage.AggregatedMetricsType
+ result.MetricsType = storagemetadata.AggregatedMetricsType
}
if p.GetMetricsStoragePolicy() != nil {
@@ -462,7 +463,7 @@ func decodeFetchOptions(rpcFetchOptions *rpc.FetchOptions) (*storage.FetchOption
return result, nil
}
- result.Limit = int(rpcFetchOptions.Limit)
+ result.SeriesLimit = int(rpcFetchOptions.Limit)
result.IncludeResolution = rpcFetchOptions.GetIncludeResolution()
unagg, err := decodeFanoutOption(rpcFetchOptions.GetUnaggregated())
if err != nil {
diff --git a/src/query/remote/codecs_complete_tags.go b/src/query/remote/codecs_complete_tags.go
index ea4e7d33f0..9e6713ac43 100644
--- a/src/query/remote/codecs_complete_tags.go
+++ b/src/query/remote/codecs_complete_tags.go
@@ -24,15 +24,16 @@ import (
"github.com/m3db/m3/src/query/errors"
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
)
func decodeTagNamesOnly(
response *rpc.TagNames,
-) []storage.CompletedTag {
+) []consolidators.CompletedTag {
names := response.GetNames()
- tags := make([]storage.CompletedTag, len(names))
+ tags := make([]consolidators.CompletedTag, len(names))
for i, name := range names {
- tags[i] = storage.CompletedTag{Name: name}
+ tags[i] = consolidators.CompletedTag{Name: name}
}
return tags
@@ -40,11 +41,11 @@ func decodeTagNamesOnly(
func decodeTagProperties(
response *rpc.TagValues,
-) []storage.CompletedTag {
+) []consolidators.CompletedTag {
values := response.GetValues()
- tags := make([]storage.CompletedTag, len(values))
+ tags := make([]consolidators.CompletedTag, len(values))
for i, value := range values {
- tags[i] = storage.CompletedTag{
+ tags[i] = consolidators.CompletedTag{
Name: value.GetKey(),
Values: value.GetValues(),
}
@@ -56,7 +57,7 @@ func decodeTagProperties(
func decodeCompleteTagsResponse(
response *rpc.CompleteTagsResponse,
completeNameOnly bool,
-) ([]storage.CompletedTag, error) {
+) ([]consolidators.CompletedTag, error) {
if names := response.GetNamesOnly(); names != nil {
if !completeNameOnly {
return nil, errors.ErrInconsistentCompleteTagsType
@@ -133,7 +134,7 @@ func decodeCompleteTagsRequest(
}
func encodeToCompressedCompleteTagsDefaultResult(
- results *storage.CompleteTagsResult,
+ results *consolidators.CompleteTagsResult,
) (*rpc.CompleteTagsResponse, error) {
tags := results.CompletedTags
values := make([]*rpc.TagValue, 0, len(tags))
@@ -156,7 +157,7 @@ func encodeToCompressedCompleteTagsDefaultResult(
}
func encodeToCompressedCompleteTagsNameOnlyResult(
- results *storage.CompleteTagsResult,
+ results *consolidators.CompleteTagsResult,
) (*rpc.CompleteTagsResponse, error) {
tags := results.CompletedTags
names := make([][]byte, 0, len(tags))
@@ -176,7 +177,7 @@ func encodeToCompressedCompleteTagsNameOnlyResult(
}
func encodeToCompressedCompleteTagsResult(
- results *storage.CompleteTagsResult,
+ results *consolidators.CompleteTagsResult,
) (*rpc.CompleteTagsResponse, error) {
if results.CompleteNameOnly {
return encodeToCompressedCompleteTagsNameOnlyResult(results)
diff --git a/src/query/remote/codecs_search.go b/src/query/remote/codecs_search.go
index a1161d026f..f917f227bc 100644
--- a/src/query/remote/codecs_search.go
+++ b/src/query/remote/codecs_search.go
@@ -27,12 +27,12 @@ import (
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
- "github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/x/serialize"
)
func multiTagResultsToM3TagProperties(
- results []m3.MultiTagResult,
+ results []consolidators.MultiTagResult,
encoderPool serialize.TagEncoderPool,
) (*rpc.M3TagProperties, error) {
props := make([]rpc.M3TagProperty, len(results))
@@ -61,7 +61,7 @@ func multiTagResultsToM3TagProperties(
// encodeToCompressedSearchResult encodes SearchResults to a compressed
// search result.
func encodeToCompressedSearchResult(
- results []m3.MultiTagResult,
+ results []consolidators.MultiTagResult,
metadata block.ResultMetadata,
pools encoding.IteratorPools,
) (*rpc.SearchResponse, error) {
@@ -98,7 +98,7 @@ func decodeDecompressedSearchResponse(
func decodeCompressedSearchResponse(
response *rpc.M3TagProperties,
pools encoding.IteratorPools,
-) ([]m3.MultiTagResult, error) {
+) ([]consolidators.MultiTagResult, error) {
if pools == nil || pools.CheckedBytesWrapper() == nil || pools.TagDecoder() == nil {
return nil, errors.ErrCannotDecodeCompressedTags
}
@@ -108,7 +108,7 @@ func decodeCompressedSearchResponse(
idPool := pools.ID()
props := response.GetProperties()
- decoded := make([]m3.MultiTagResult, len(props))
+ decoded := make([]consolidators.MultiTagResult, len(props))
for i, prop := range props {
checkedBytes := cbwPool.Get(prop.GetCompressedTags())
decoder := decoderPool.Get()
@@ -118,7 +118,7 @@ func decodeCompressedSearchResponse(
}
id := idPool.BinaryID(cbwPool.Get(prop.GetId()))
- decoded[i] = m3.MultiTagResult{
+ decoded[i] = consolidators.MultiTagResult{
ID: id,
// Copy underlying TagIterator bytes before closing the decoder and returning it to the pool
Iter: decoder.Duplicate(),
diff --git a/src/query/remote/codecs_test.go b/src/query/remote/codecs_test.go
index 00a7189ea0..03dcc49006 100644
--- a/src/query/remote/codecs_test.go
+++ b/src/query/remote/codecs_test.go
@@ -36,6 +36,7 @@ import (
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/test"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/instrument"
@@ -131,10 +132,10 @@ func createStorageFetchQuery(t *testing.T) (*storage.FetchQuery, time.Time, time
func TestEncodeFetchMessage(t *testing.T) {
rQ, start, end := createStorageFetchQuery(t)
fetchOpts := storage.NewFetchOptions()
- fetchOpts.Limit = 42
+ fetchOpts.SeriesLimit = 42
fetchOpts.RestrictQueryOptions = &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.MustParseStoragePolicy("1m:14d"),
},
}
@@ -172,10 +173,10 @@ func TestEncodeFetchMessage(t *testing.T) {
func TestEncodeDecodeFetchQuery(t *testing.T) {
rQ, _, _ := createStorageFetchQuery(t)
fetchOpts := storage.NewFetchOptions()
- fetchOpts.Limit = 42
+ fetchOpts.SeriesLimit = 42
fetchOpts.RestrictQueryOptions = &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.MustParseStoragePolicy("1m:14d"),
},
}
@@ -190,7 +191,7 @@ func TestEncodeDecodeFetchQuery(t *testing.T) {
revertedOpts, err := decodeFetchOptions(gq.GetOptions())
require.NoError(t, err)
require.NotNil(t, revertedOpts)
- require.Equal(t, fetchOpts.Limit, revertedOpts.Limit)
+ require.Equal(t, fetchOpts.SeriesLimit, revertedOpts.SeriesLimit)
require.Equal(t, fetchOpts.RestrictQueryOptions.
RestrictByType.MetricsType,
revertedOpts.RestrictQueryOptions.RestrictByType.MetricsType)
@@ -251,7 +252,7 @@ func TestNewRestrictQueryOptionsFromProto(t *testing.T) {
},
expected: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.UnaggregatedMetricsType,
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
},
},
},
@@ -283,7 +284,7 @@ func TestNewRestrictQueryOptionsFromProto(t *testing.T) {
},
expected: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.NewStoragePolicy(time.Minute,
xtime.Second, 24*time.Hour),
},
@@ -407,7 +408,7 @@ func TestRestrictQueryOptionsProto(t *testing.T) {
{
value: storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.UnaggregatedMetricsType,
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
},
RestrictByTag: &storage.RestrictByTag{
Restrict: []models.Matcher{
@@ -433,7 +434,7 @@ func TestRestrictQueryOptionsProto(t *testing.T) {
{
value: storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.NewStoragePolicy(time.Minute,
xtime.Second, 24*time.Hour),
},
@@ -472,7 +473,7 @@ func TestRestrictQueryOptionsProto(t *testing.T) {
{
value: storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.MetricsType(uint(math.MaxUint16)),
+ MetricsType: storagemetadata.MetricsType(uint(math.MaxUint16)),
},
},
errContains: "unknown metrics type:",
@@ -480,7 +481,7 @@ func TestRestrictQueryOptionsProto(t *testing.T) {
{
value: storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.UnaggregatedMetricsType,
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
StoragePolicy: policy.NewStoragePolicy(time.Minute,
xtime.Second, 24*time.Hour),
},
diff --git a/src/query/remote/compressed_codecs.go b/src/query/remote/compressed_codecs.go
index 17d95d2980..ef466b1426 100644
--- a/src/query/remote/compressed_codecs.go
+++ b/src/query/remote/compressed_codecs.go
@@ -34,7 +34,7 @@ import (
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/query/errors"
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
- "github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/serialize"
@@ -69,6 +69,7 @@ func compressedSegmentFromBlockReader(br xio.BlockReader) (*rpc.M3Segment, error
Tail: segment.Tail.Bytes(),
StartTime: xtime.ToNanoseconds(br.Start),
BlockSize: int64(br.BlockSize),
+ Checksum: segment.CalculateChecksum(),
}, nil
}
@@ -137,24 +138,29 @@ func buildTags(tagIter ident.TagIterator, iterPools encoding.IteratorPools) ([]b
return nil, errors.ErrCannotEncodeCompressedTags
}
-/*
-Builds compressed rpc series from a SeriesIterator
-SeriesIterator is the top level iterator returned by m3db
-This SeriesIterator contains MultiReaderIterators, each representing a single
-replica. Each MultiReaderIterator has a ReaderSliceOfSlicesIterator where each
-step through the iterator exposes a slice of underlying BlockReaders. Each
-BlockReader contains the run time encoded bytes that represent the series.
-
-SeriesIterator also has a TagIterator representing the tags associated with it.
-
-This function transforms a SeriesIterator into a protobuf representation to be
-able to send it across the wire without needing to expand the series.
-*/
-func compressedSeriesFromSeriesIterator(
+// CompressedSeriesFromSeriesIterator builds compressed rpc series from a SeriesIterator
+// SeriesIterator is the top level iterator returned by m3db
+func CompressedSeriesFromSeriesIterator(
it encoding.SeriesIterator,
iterPools encoding.IteratorPools,
) (*rpc.Series, error) {
- replicas := it.Replicas()
+ // This SeriesIterator contains MultiReaderIterators, each representing a single
+ // replica. Each MultiReaderIterator has a ReaderSliceOfSlicesIterator where each
+ // step through the iterator exposes a slice of underlying BlockReaders. Each
+ // BlockReader contains the run time encoded bytes that represent the series.
+ //
+ // SeriesIterator also has a TagIterator representing the tags associated with it.
+ //
+ // This function transforms a SeriesIterator into a protobuf representation to be
+ // able to send it across the wire without needing to expand the series.
+ //
+ // If reset argument is true, the SeriesIterator readers will be reset so it can
+ // be iterated again. If false, the SeriesIterator will no longer be useable.
+ replicas, err := it.Replicas()
+ if err != nil {
+ return nil, err
+ }
+
compressedReplicas := make([]*rpc.M3CompressedValuesReplica, 0, len(replicas))
for _, replica := range replicas {
replicaSegments := make([]*rpc.M3Segments, 0, len(replicas))
@@ -164,19 +170,29 @@ func compressedSeriesFromSeriesIterator(
if err != nil {
return nil, err
}
-
replicaSegments = append(replicaSegments, segments)
}
- compressedReplicas = append(compressedReplicas, &rpc.M3CompressedValuesReplica{
+ // Rewind the reader state back to beginning to the it can be re-iterated by caller.
+ // These multi-readers are queued up via ResetSliceOfSlices so that the first Current
+ // index is set, and therefore we must also call an initial Next here to match that state.
+ // This behavior is not obvious so we should later change ResetSliceOfSlices to not do this
+ // initial Next move and assert that all iters start w/ Current as nil.
+ readers.Rewind()
+ readers.Next()
+
+ r := &rpc.M3CompressedValuesReplica{
Segments: replicaSegments,
- })
+ }
+ compressedReplicas = append(compressedReplicas, r)
}
start := xtime.ToNanoseconds(it.Start())
end := xtime.ToNanoseconds(it.End())
- tags, err := buildTags(it.Tags(), iterPools)
+ itTags := it.Tags()
+ defer itTags.Rewind()
+ tags, err := buildTags(itTags, iterPools)
if err != nil {
return nil, err
}
@@ -198,13 +214,13 @@ func compressedSeriesFromSeriesIterator(
// encodeToCompressedSeries encodes SeriesIterators to compressed series.
func encodeToCompressedSeries(
- results m3.SeriesFetchResult,
+ results consolidators.SeriesFetchResult,
iterPools encoding.IteratorPools,
) ([]*rpc.Series, error) {
- iters := results.SeriesIterators.Iters()
+ iters := results.SeriesIterators()
seriesList := make([]*rpc.Series, 0, len(iters))
for _, iter := range iters {
- series, err := compressedSeriesFromSeriesIterator(iter, iterPools)
+ series, err := CompressedSeriesFromSeriesIterator(iter, iterPools)
if err != nil {
return nil, err
}
@@ -238,7 +254,7 @@ func blockReaderFromCompressedSegment(
checkedBytesWrapperPool xpool.CheckedBytesWrapperPool,
) xio.BlockReader {
head, tail := segmentBytesFromCompressedSegment(seg.GetHead(), seg.GetTail(), opts, checkedBytesWrapperPool)
- segment := ts.NewSegment(head, tail, ts.FinalizeNone)
+ segment := ts.NewSegment(head, tail, seg.GetChecksum(), ts.FinalizeNone)
segmentReader := xio.NewSegmentReader(segment)
return xio.BlockReader{
@@ -371,8 +387,8 @@ func seriesIteratorFromCompressedSeries(
id = ident.StringID(idString)
}
- start := xtime.FromNanoseconds(meta.GetStartTime())
- end := xtime.FromNanoseconds(meta.GetEndTime())
+ start := xtime.UnixNano(meta.GetStartTime())
+ end := xtime.UnixNano(meta.GetEndTime())
var seriesIter encoding.SeriesIterator
if seriesIterPool != nil {
@@ -392,9 +408,9 @@ func seriesIteratorFromCompressedSeries(
return seriesIter, nil
}
-// decodeCompressedFetchResponse decodes compressed fetch
+// DecodeCompressedFetchResponse decodes compressed fetch
// response to seriesIterators.
-func decodeCompressedFetchResponse(
+func DecodeCompressedFetchResponse(
fetchResult *rpc.FetchResponse,
iteratorPools encoding.IteratorPools,
) (encoding.SeriesIterators, error) {
diff --git a/src/query/remote/compressed_codecs_test.go b/src/query/remote/compressed_codecs_test.go
index effc073fe0..14b22437eb 100644
--- a/src/query/remote/compressed_codecs_test.go
+++ b/src/query/remote/compressed_codecs_test.go
@@ -26,8 +26,9 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/block"
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
- "github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/test"
"github.com/m3db/m3/src/x/ident"
@@ -61,7 +62,8 @@ func buildTestSeriesIterator(t *testing.T) encoding.SeriesIterator {
func validateSeriesInternals(t *testing.T, it encoding.SeriesIterator) {
defer it.Close()
- replicas := it.Replicas()
+ replicas, err := it.Replicas()
+ require.NoError(t, err)
expectedReaders := []int{1, 2}
expectedStarts := []time.Time{start.Truncate(blockSize), middle.Truncate(blockSize)}
for _, replica := range replicas {
@@ -177,14 +179,14 @@ func verifyCompressedSeries(t *testing.T, s *rpc.Series) {
func TestConversionToCompressedData(t *testing.T) {
it := buildTestSeriesIterator(t)
- series, err := compressedSeriesFromSeriesIterator(it, nil)
+ series, err := CompressedSeriesFromSeriesIterator(it, nil)
require.Error(t, err)
require.Nil(t, series)
}
func TestSeriesConversionFromCompressedData(t *testing.T) {
it := buildTestSeriesIterator(t)
- series, err := compressedSeriesFromSeriesIterator(it, nil)
+ series, err := CompressedSeriesFromSeriesIterator(it, nil)
require.Error(t, err)
require.Nil(t, series)
}
@@ -192,7 +194,7 @@ func TestSeriesConversionFromCompressedData(t *testing.T) {
func TestSeriesConversionFromCompressedDataWithIteratorPool(t *testing.T) {
it := buildTestSeriesIterator(t)
ip := test.MakeMockIteratorPool()
- series, err := compressedSeriesFromSeriesIterator(it, ip)
+ series, err := CompressedSeriesFromSeriesIterator(it, ip)
require.NoError(t, err)
verifyCompressedSeries(t, series)
@@ -224,10 +226,13 @@ func TestEncodeToCompressedFetchResult(t *testing.T) {
[]encoding.SeriesIterator{buildTestSeriesIterator(t),
buildTestSeriesIterator(t)}, nil)
ip := test.MakeMockIteratorPool()
- result := m3.SeriesFetchResult{
- SeriesIterators: iters,
- }
+ result, err := consolidators.NewSeriesFetchResult(
+ iters,
+ nil,
+ block.NewResultMetadata(),
+ )
+ require.NoError(t, err)
fetchResult, err := encodeToCompressedSeries(result, ip)
require.NoError(t, err)
@@ -250,10 +255,13 @@ func TestDecodeCompressedFetchResult(t *testing.T) {
iters := encoding.NewSeriesIterators(
[]encoding.SeriesIterator{buildTestSeriesIterator(t),
buildTestSeriesIterator(t)}, nil)
- result := m3.SeriesFetchResult{
- SeriesIterators: iters,
- }
+ result, err := consolidators.NewSeriesFetchResult(
+ iters,
+ nil,
+ block.NewResultMetadata(),
+ )
+ require.NoError(t, err)
compressed, err := encodeToCompressedSeries(result, nil)
require.Error(t, err)
require.Nil(t, compressed)
@@ -265,10 +273,13 @@ func TestDecodeCompressedFetchResultWithIteratorPool(t *testing.T) {
[]encoding.SeriesIterator{buildTestSeriesIterator(t),
buildTestSeriesIterator(t)}, nil)
- result := m3.SeriesFetchResult{
- SeriesIterators: iters,
- }
+ result, err := consolidators.NewSeriesFetchResult(
+ iters,
+ nil,
+ block.NewResultMetadata(),
+ )
+ require.NoError(t, err)
compressed, err := encodeToCompressedSeries(result, ip)
require.NoError(t, err)
require.Len(t, compressed, 2)
@@ -280,7 +291,7 @@ func TestDecodeCompressedFetchResultWithIteratorPool(t *testing.T) {
Series: compressed,
}
- revertedIters, err := decodeCompressedFetchResponse(fetchResult, ip)
+ revertedIters, err := DecodeCompressedFetchResponse(fetchResult, ip)
require.NoError(t, err)
revertedIterList := revertedIters.Iters()
require.Len(t, revertedIterList, 2)
@@ -288,7 +299,7 @@ func TestDecodeCompressedFetchResultWithIteratorPool(t *testing.T) {
validateSeries(t, seriesIterator)
}
- revertedIters, err = decodeCompressedFetchResponse(fetchResult, ip)
+ revertedIters, err = DecodeCompressedFetchResponse(fetchResult, ip)
require.NoError(t, err)
revertedIterList = revertedIters.Iters()
require.Len(t, revertedIterList, 2)
@@ -311,12 +322,22 @@ func TestConversionDoesNotCloseSeriesIterator(t *testing.T) {
ctrl := gomock.NewController(t)
mockIter := encoding.NewMockSeriesIterator(ctrl)
mockIter.EXPECT().Close().Times(0)
- mockIter.EXPECT().Replicas().Return([]encoding.MultiReaderIterator{}).Times(1)
+ mockIter.EXPECT().Replicas().Return([]encoding.MultiReaderIterator{}, nil).Times(1)
mockIter.EXPECT().Start().Return(time.Now()).Times(1)
mockIter.EXPECT().End().Return(time.Now()).Times(1)
mockIter.EXPECT().Tags().Return(ident.NewTagsIterator(ident.NewTags())).Times(1)
mockIter.EXPECT().Namespace().Return(ident.StringID("")).Times(1)
mockIter.EXPECT().ID().Return(ident.StringID("")).Times(1)
- compressedSeriesFromSeriesIterator(mockIter, nil)
+ CompressedSeriesFromSeriesIterator(mockIter, nil)
+}
+
+func TestIterablePostCompression(t *testing.T) {
+ it := buildTestSeriesIterator(t)
+ ip := test.MakeMockIteratorPool()
+ series, err := CompressedSeriesFromSeriesIterator(it, ip)
+ require.NoError(t, err)
+ require.NotNil(t, series)
+
+ validateSeries(t, it)
}
diff --git a/src/query/remote/server.go b/src/query/remote/server.go
index bbba664b9c..5d677e3372 100644
--- a/src/query/remote/server.go
+++ b/src/query/remote/server.go
@@ -29,8 +29,8 @@ import (
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/pools"
- "github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/util/logging"
"github.com/m3db/m3/src/x/instrument"
@@ -124,9 +124,14 @@ func (s *grpcServer) Fetch(
}
fetchOpts.Remote = true
- if fetchOpts.Limit == 0 {
+ if fetchOpts.SeriesLimit == 0 {
// Allow default to be set if not explicitly passed.
- fetchOpts.Limit = s.queryContextOpts.LimitMaxTimeseries
+ fetchOpts.SeriesLimit = s.queryContextOpts.LimitMaxTimeseries
+ }
+
+ if fetchOpts.DocsLimit == 0 {
+ // Allow default to be set if not explicitly passed.
+ fetchOpts.DocsLimit = s.queryContextOpts.LimitMaxDocs
}
result, cleanup, err := s.querier.FetchCompressed(ctx, storeQuery, fetchOpts)
@@ -252,7 +257,7 @@ func (s *grpcServer) CompleteTags(
size := min(defaultBatch, len(tags))
for ; len(tags) > 0; tags = tags[size:] {
size = min(size, len(tags))
- results := &storage.CompleteTagsResult{
+ results := &consolidators.CompleteTagsResult{
CompleteNameOnly: completed.CompleteNameOnly,
CompletedTags: tags[:size],
Metadata: completed.Metadata,
diff --git a/src/query/remote/server_test.go b/src/query/remote/server_test.go
index 8f4c149fc9..cd54ebb5eb 100644
--- a/src/query/remote/server_test.go
+++ b/src/query/remote/server_test.go
@@ -38,6 +38,7 @@ import (
"github.com/m3db/m3/src/query/pools"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/test"
"github.com/m3db/m3/src/query/ts/m3db"
"github.com/m3db/m3/src/x/ident"
@@ -52,7 +53,8 @@ import (
var (
errRead = errors.New("read error")
- poolsWrapper = pools.NewPoolsWrapper(pools.BuildIteratorPools())
+ poolsWrapper = pools.NewPoolsWrapper(
+ pools.BuildIteratorPools(pools.BuildIteratorPoolsOptions{}))
)
type mockStorageOptions struct {
@@ -74,14 +76,14 @@ func newMockStorage(
ctx context.Context,
query *storage.FetchQuery,
options *storage.FetchOptions,
- ) (m3.SeriesFetchResult, m3.Cleanup, error) {
+ ) (consolidators.SeriesFetchResult, m3.Cleanup, error) {
var cleanup = func() error { return nil }
if opts.cleanup != nil {
cleanup = opts.cleanup
}
if opts.err != nil {
- return m3.SeriesFetchResult{
+ return consolidators.SeriesFetchResult{
Metadata: block.NewResultMetadata(),
}, cleanup, opts.err
}
@@ -100,10 +102,13 @@ func newMockStorage(
)
}
- return m3.SeriesFetchResult{
- SeriesIterators: iters,
- Metadata: block.NewResultMetadata(),
- }, cleanup, nil
+ res, err := consolidators.NewSeriesFetchResult(
+ iters,
+ nil,
+ block.NewResultMetadata(),
+ )
+
+ return res, cleanup, err
}).
AnyTimes()
return store
@@ -431,7 +436,7 @@ func TestBatchedSearch(t *testing.T) {
for _, size := range sizes {
var (
msg = fmt.Sprintf("batch size: %d", size)
- tags = make([]m3.MultiTagResult, 0, size)
+ tags = make([]consolidators.MultiTagResult, 0, size)
names = make([]string, 0, size)
cleaned = false
)
@@ -444,7 +449,7 @@ func TestBatchedSearch(t *testing.T) {
for i := 0; i < size; i++ {
name := fmt.Sprintf("%s_%d", seriesID, i)
- tag := m3.MultiTagResult{
+ tag := consolidators.MultiTagResult{
ID: ident.StringID(name),
Iter: ident.NewTagsIterator(ident.NewTags(
ident.Tag{
@@ -459,7 +464,7 @@ func TestBatchedSearch(t *testing.T) {
}
store := m3.NewMockStorage(ctrl)
- tagResult := m3.TagResult{
+ tagResult := consolidators.TagResult{
Tags: tags,
Metadata: block.NewResultMetadata(),
}
@@ -502,12 +507,12 @@ func TestBatchedCompleteTags(t *testing.T) {
for _, size := range sizes {
var (
msg = fmt.Sprintf("batch size: %d, name only: %t", size, nameOnly)
- tags = make([]storage.CompletedTag, 0, size)
+ tags = make([]consolidators.CompletedTag, 0, size)
)
for i := 0; i < size; i++ {
name := fmt.Sprintf("%s_%d", seriesID, i)
- tag := storage.CompletedTag{
+ tag := consolidators.CompletedTag{
Name: []byte(name),
}
@@ -519,7 +524,7 @@ func TestBatchedCompleteTags(t *testing.T) {
}
store := m3.NewMockStorage(ctrl)
- expected := &storage.CompleteTagsResult{
+ expected := &consolidators.CompleteTagsResult{
CompleteNameOnly: nameOnly,
CompletedTags: tags,
Metadata: block.ResultMetadata{
diff --git a/src/query/server/cost_reporters.go b/src/query/server/cost_reporters.go
index 6a165c6ab9..1f6e2d0b5a 100644
--- a/src/query/server/cost_reporters.go
+++ b/src/query/server/cost_reporters.go
@@ -23,10 +23,12 @@ package server
// This file contains reporters and setup for our query/cost.ChainedEnforcer
// instances.
import (
+ "fmt"
"sync"
"github.com/m3db/m3/src/cmd/services/m3query/config"
qcost "github.com/m3db/m3/src/query/cost"
+ "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/cost"
"github.com/m3db/m3/src/x/instrument"
@@ -34,6 +36,9 @@ import (
)
const (
+ costScopeName = "cost"
+ limitManagerScopeName = "limits"
+ reporterScopeName = "reporter"
queriesOverLimitMetric = "over_datapoints_limit"
datapointsMetric = "datapoints"
datapointsCounterMetric = "datapoints_counter"
@@ -45,49 +50,90 @@ const (
// on them (as configured by cfg.Limits); per-block is purely for accounting
// purposes.
// Our enforcers report at least these stats:
-// cost.global.datapoints: gauge; the number of datapoints currently in use
-// by this instance.
+// cost_reporter_datapoints{limit="global"}: gauge;
+// > the number of datapoints currently in use by this instance.
//
-// cost.global.datapoints_counter: counter; counter representation of the
-// number of datapoints in use by this instance
+// cost_reporter_datapoints_counter{limiter="global"}: counter;
+// > counter representation of the number of datapoints in use by this instance.
//
-// cost.{per_query,global}.over_datapoints_limit: counter; how many queries are over the
-// datapoint limit
+// cost_reporter_over_datapoints_limit{limiter=~"(global|per_query)"}: counter;
+// > how many queries are over the datapoint limit.
//
-// cost.per_query.max_datapoints_hist: histogram; represents the
-// distribution of the maximum datapoints used at any point in each query.
-func newConfiguredChainedEnforcer(cfg *config.Configuration, instrumentOptions instrument.Options) (qcost.ChainedEnforcer, error) {
- costScope := instrumentOptions.MetricsScope().SubScope("cost")
- costIops := instrumentOptions.SetMetricsScope(costScope)
- limitMgr := cost.NewStaticLimitManager(cfg.Limits.Global.AsLimitManagerOptions().SetInstrumentOptions(costIops))
- tracker := cost.NewTracker()
-
- globalEnforcer := cost.NewEnforcer(limitMgr, tracker,
- cost.NewEnforcerOptions().SetReporter(
- newGlobalReporter(costScope.SubScope("global")),
- ).SetCostExceededMessage("limits.global.maxFetchedDatapoints exceeded"),
- )
-
- queryEnforcerOpts := cost.NewEnforcerOptions().SetCostExceededMessage("limits.perQuery.maxFetchedDatapoints exceeded").
- SetReporter(newPerQueryReporter(costScope.
- SubScope("per_query")))
-
- queryEnforcer := cost.NewEnforcer(
- cost.NewStaticLimitManager(cfg.Limits.PerQuery.AsLimitManagerOptions()),
- cost.NewTracker(),
- queryEnforcerOpts)
+// cost_reporter_max_datapoints_hist{limiter=~"(global|per_query)"}: histogram;
+// > represents the distribution of the maximum datapoints used at any point in each query.
+func newConfiguredChainedEnforcer(
+ cfg *config.Configuration,
+ instrumentOptions instrument.Options,
+) (qcost.ChainedEnforcer, close.SimpleCloser, error) {
+ scope := instrumentOptions.MetricsScope().SubScope(costScopeName)
+
+ exceededMessage := func(exceedType, exceedLimit string) string {
+ return fmt.Sprintf("exceeded limits.%s.%s", exceedType, exceedLimit)
+ }
+
+ // Create global limit manager and enforcer.
+ globalScope := scope.Tagged(map[string]string{
+ "limiter": "global",
+ })
+ globalLimitManagerScope := globalScope.SubScope(limitManagerScopeName)
+ globalReporterScope := globalScope.SubScope(reporterScopeName)
+
+ globalLimitMgr := cost.NewStaticLimitManager(
+ cfg.Limits.Global.AsLimitManagerOptions().
+ SetInstrumentOptions(instrumentOptions.SetMetricsScope(globalLimitManagerScope)))
+
+ globalTracker := cost.NewTracker()
+
+ globalEnforcer := cost.NewEnforcer(globalLimitMgr, globalTracker,
+ cost.NewEnforcerOptions().
+ SetReporter(newGlobalReporter(globalReporterScope)).
+ SetCostExceededMessage(exceededMessage("global", "maxFetchedDatapoints")))
+
+ // Create per query limit manager and enforcer.
+ queryScope := scope.Tagged(map[string]string{
+ "limiter": "query",
+ })
+ queryLimitManagerScope := queryScope.SubScope(limitManagerScopeName)
+ queryReporterScope := queryScope.SubScope(reporterScopeName)
+ queryLimitMgr := cost.NewStaticLimitManager(
+ cfg.Limits.PerQuery.AsLimitManagerOptions().
+ SetInstrumentOptions(instrumentOptions.SetMetricsScope(queryLimitManagerScope)))
+
+ queryTracker := cost.NewTracker()
+
+ queryEnforcer := cost.NewEnforcer(queryLimitMgr, queryTracker,
+ cost.NewEnforcerOptions().
+ SetReporter(newPerQueryReporter(queryReporterScope)).
+ SetCostExceededMessage(exceededMessage("perQuery", "maxFetchedDatapoints")))
+
+ // Create block enforcer.
blockEnforcer := cost.NewEnforcer(
cost.NewStaticLimitManager(cost.NewLimitManagerOptions().SetDefaultLimit(cost.Limit{Enabled: false})),
cost.NewTracker(),
- nil,
- )
+ nil)
- return qcost.NewChainedEnforcer(qcost.GlobalLevel, []cost.Enforcer{
+ // Create chained enforcer.
+ enforcer, err := qcost.NewChainedEnforcer(qcost.GlobalLevel, []cost.Enforcer{
globalEnforcer,
queryEnforcer,
blockEnforcer,
})
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Start reporting stats for all limit managers.
+ go globalLimitMgr.Report()
+ go queryLimitMgr.Report()
+
+ // Close the stats at the end.
+ closer := close.SimpleCloserFn(func() {
+ globalLimitMgr.Close()
+ queryLimitMgr.Close()
+ })
+
+ return enforcer, closer, nil
}
// globalReporter records ChainedEnforcer statistics for the global enforcer.
diff --git a/src/query/server/cost_reporters_test.go b/src/query/server/cost_reporters_test.go
index 26fefc6252..a301123bb8 100644
--- a/src/query/server/cost_reporters_test.go
+++ b/src/query/server/cost_reporters_test.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/query/cost"
+ "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/cost/test"
"github.com/m3db/m3/src/x/instrument"
@@ -35,17 +36,22 @@ import (
"github.com/uber-go/tally"
)
-func TestNewConfiguredChainedEnforcer(t *testing.T) {
- type testCtx struct {
- Scope tally.TestScope
- GlobalEnforcer cost.ChainedEnforcer
- }
+type enforcerTestCtx struct {
+ Scope tally.TestScope
+ GlobalEnforcer cost.ChainedEnforcer
+ Closer close.SimpleCloser
+}
+
+func (c enforcerTestCtx) Close() {
+ c.Closer.Close()
+}
- setup := func(t *testing.T, perQueryLimit, globalLimit int64) testCtx {
+func TestNewConfiguredChainedEnforcer(t *testing.T) {
+ setup := func(t *testing.T, perQueryLimit, globalLimit int) enforcerTestCtx {
s := tally.NewTestScope("", nil)
iopts := instrument.NewOptions().SetMetricsScope(s)
- globalEnforcer, err := newConfiguredChainedEnforcer(&config.Configuration{
+ globalEnforcer, closer, err := newConfiguredChainedEnforcer(&config.Configuration{
Limits: config.LimitsConfiguration{
PerQuery: config.PerQueryLimitsConfiguration{
MaxFetchedDatapoints: perQueryLimit,
@@ -58,14 +64,16 @@ func TestNewConfiguredChainedEnforcer(t *testing.T) {
require.NoError(t, err)
- return testCtx{
+ return enforcerTestCtx{
Scope: s,
GlobalEnforcer: globalEnforcer,
+ Closer: closer,
}
}
t.Run("has 3 valid levels", func(t *testing.T) {
tctx := setup(t, 6, 10)
+ defer tctx.Close()
assertValid := func(ce cost.ChainedEnforcer) {
assert.NotEqual(t, ce, cost.NoopChainedEnforcer())
@@ -86,6 +94,8 @@ func TestNewConfiguredChainedEnforcer(t *testing.T) {
t.Run("configures reporters", func(t *testing.T) {
tctx := setup(t, 6, 10)
+ defer tctx.Close()
+
queryEf := tctx.GlobalEnforcer.Child(cost.QueryLevel)
blockEf := queryEf.Child(cost.BlockLevel)
blockEf.Add(7)
@@ -93,7 +103,7 @@ func TestNewConfiguredChainedEnforcer(t *testing.T) {
assertHasGauge(t,
tctx.Scope.Snapshot(),
tally.KeyForPrefixedStringMap(
- fmt.Sprintf("cost.global.%s", datapointsMetric), nil),
+ fmt.Sprintf("cost.reporter.%s", datapointsMetric), map[string]string{"limiter": "global"}),
7,
)
@@ -103,26 +113,29 @@ func TestNewConfiguredChainedEnforcer(t *testing.T) {
assertHasHistogram(t,
tctx.Scope.Snapshot(),
tally.KeyForPrefixedStringMap(
- fmt.Sprintf("cost.per_query.%s", maxDatapointsHistMetric), nil),
+ fmt.Sprintf("cost.reporter.%s", maxDatapointsHistMetric), map[string]string{"limiter": "query"}),
map[float64]int64{10: 1},
)
})
t.Run("block level doesn't have a limit", func(t *testing.T) {
tctx := setup(t, -1, -1)
+ defer tctx.Close()
+
block := tctx.GlobalEnforcer.Child(cost.QueryLevel).Child(cost.BlockLevel)
assert.NoError(t, block.Add(math.MaxFloat64-1).Error)
})
t.Run("works e2e", func(t *testing.T) {
tctx := setup(t, 6, 10)
+ defer tctx.Close()
qe1, qe2 := tctx.GlobalEnforcer.Child(cost.QueryLevel), tctx.GlobalEnforcer.Child(cost.QueryLevel)
r := qe1.Add(6)
test.AssertLimitErrorWithMsg(
t,
r.Error,
- "exceeded query limit: limits.perQuery.maxFetchedDatapoints exceeded",
+ "exceeded query limit: exceeded limits.perQuery.maxFetchedDatapoints",
6,
6)
@@ -133,7 +146,7 @@ func TestNewConfiguredChainedEnforcer(t *testing.T) {
test.AssertLimitErrorWithMsg(
t,
r.Error,
- "exceeded global limit: limits.global.maxFetchedDatapoints exceeded",
+ "exceeded global limit: exceeded limits.global.maxFetchedDatapoints",
11,
10)
diff --git a/src/query/server/multi_process.go b/src/query/server/multi_process.go
new file mode 100644
index 0000000000..2ce47b5bb7
--- /dev/null
+++ b/src/query/server/multi_process.go
@@ -0,0 +1,185 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package server
+
+import (
+ "fmt"
+ "math"
+ "net"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+
+ "github.com/m3db/m3/src/cmd/services/m3query/config"
+ "github.com/m3db/m3/src/x/instrument"
+ xnet "github.com/m3db/m3/src/x/net"
+ "github.com/m3db/m3/src/x/panicmon"
+
+ "go.uber.org/zap"
+)
+
+const (
+ multiProcessInstanceEnvVar = "MULTIPROCESS_INSTANCE"
+ multiProcessParentInstance = "0"
+ multiProcessMetricTagID = "multiprocess_id"
+ goMaxProcsEnvVar = "GOMAXPROCS"
+)
+
+type multiProcessResult struct {
+ isParentCleanExit bool
+
+ cfg config.Configuration
+ logger *zap.Logger
+ listenerOpts xnet.ListenerOptions
+}
+
+func multiProcessProcessID() string {
+ return os.Getenv(multiProcessInstanceEnvVar)
+}
+
+func multiProcessRun(
+ cfg config.Configuration,
+ logger *zap.Logger,
+ listenerOpts xnet.ListenerOptions,
+) (multiProcessResult, error) {
+ multiProcessInstance := multiProcessProcessID()
+ if multiProcessInstance != "" {
+ // Otherwise is already a sub-process, make sure listener options
+ // will reuse ports so multiple processes can listen on the same
+ // listen port.
+ listenerOpts = xnet.NewListenerOptions(xnet.ListenerReusePort(true))
+
+ // Configure instrumentation to be correctly partitioned.
+ logger = logger.With(zap.String("processID", multiProcessInstance))
+
+ instance, err := strconv.Atoi(multiProcessInstance)
+ if err != nil {
+ return multiProcessResult{},
+ fmt.Errorf("multi-process process ID is non-integer: %v", err)
+ }
+
+ // Set the root scope multi-process process ID.
+ if cfg.Metrics.RootScope == nil {
+ cfg.Metrics.RootScope = &instrument.ScopeConfiguration{}
+ }
+ if cfg.Metrics.RootScope.CommonTags == nil {
+ cfg.Metrics.RootScope.CommonTags = make(map[string]string)
+ }
+ cfg.Metrics.RootScope.CommonTags[multiProcessMetricTagID] = multiProcessInstance
+
+ // Listen on a different Prometheus metrics handler listen port.
+ if cfg.Metrics.PrometheusReporter != nil && cfg.Metrics.PrometheusReporter.ListenAddress != "" {
+ // Simply increment the listen address port by instance numbe
+ host, port, err := net.SplitHostPort(cfg.Metrics.PrometheusReporter.ListenAddress)
+ if err != nil {
+ return multiProcessResult{},
+ fmt.Errorf("could not split host:port for metrics reporter: %v", err)
+ }
+
+ portValue, err := strconv.Atoi(port)
+ if err != nil {
+ return multiProcessResult{},
+ fmt.Errorf("prometheus metric reporter port is non-integer: %v", err)
+ }
+ if portValue > 0 {
+ // Increment port value by process ID if valid port.
+ address := net.JoinHostPort(host, strconv.Itoa(portValue+instance-1))
+ cfg.Metrics.PrometheusReporter.ListenAddress = address
+ logger.Info("multi-process prometheus metrics reporter listen address configured",
+ zap.String("address", address))
+ }
+ }
+ return multiProcessResult{
+ cfg: cfg,
+ logger: logger,
+ listenerOpts: listenerOpts,
+ }, nil
+ }
+
+ logger = logger.With(zap.String("processID", multiProcessParentInstance))
+
+ perCPU := defaultPerCPUMultiProcess
+ if v := cfg.MultiProcess.PerCPU; v > 0 {
+ // Allow config to override per CPU factor for determining count.
+ perCPU = v
+ }
+
+ count := int(math.Max(1, float64(runtime.NumCPU())*perCPU))
+ if v := cfg.MultiProcess.Count; v > 0 {
+ // Allow config to override per CPU auto derived count.
+ count = v
+ }
+
+ logger.Info("starting multi-process subprocesses",
+ zap.Int("count", count))
+ var (
+ wg sync.WaitGroup
+ statuses = make([]panicmon.StatusCode, count)
+ )
+ for i := 0; i < count; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ newEnv := []string{
+ fmt.Sprintf("%s=%d", multiProcessInstanceEnvVar, i+1),
+ }
+
+ // Set GOMAXPROCS correctly if configured.
+ if v := cfg.MultiProcess.GoMaxProcs; v > 0 {
+ newEnv = append(newEnv,
+ fmt.Sprintf("%s=%d", goMaxProcsEnvVar, v))
+ }
+
+ newEnv = append(newEnv, os.Environ()...)
+
+ exec := panicmon.NewExecutor(panicmon.ExecutorOptions{
+ Env: newEnv,
+ })
+ status, err := exec.Run(os.Args)
+ if err != nil {
+ logger.Error("process failed", zap.Error(err))
+ }
+
+ statuses[i] = status
+ }()
+ }
+
+ wg.Wait()
+
+ exitNotOk := 0
+ for _, v := range statuses {
+ if v != 0 {
+ exitNotOk++
+ }
+ }
+
+ if exitNotOk > 0 {
+ return multiProcessResult{},
+ fmt.Errorf("child exit codes not ok: %v", statuses)
+ }
+
+ return multiProcessResult{
+ isParentCleanExit: true,
+ }, nil
+}
diff --git a/src/query/server/query.go b/src/query/server/query.go
index 534d63f0f1..f316fca5c8 100644
--- a/src/query/server/query.go
+++ b/src/query/server/query.go
@@ -31,8 +31,10 @@ import (
"strings"
"time"
+ "github.com/m3db/m3/src/aggregator/server"
clusterclient "github.com/m3db/m3/src/cluster/client"
etcdclient "github.com/m3db/m3/src/cluster/client/etcd"
+ "github.com/m3db/m3/src/cmd/services/m3aggregator/serve"
"github.com/m3db/m3/src/cmd/services/m3coordinator/downsample"
"github.com/m3db/m3/src/cmd/services/m3coordinator/ingest"
ingestcarbon "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest/carbon"
@@ -54,6 +56,8 @@ import (
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/fanout"
"github.com/m3db/m3/src/query/storage/m3"
+ queryconsolidators "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/storage/remote"
"github.com/m3db/m3/src/query/stores/m3db"
tsdb "github.com/m3db/m3/src/query/ts/m3db"
@@ -61,6 +65,8 @@ import (
"github.com/m3db/m3/src/x/clock"
xconfig "github.com/m3db/m3/src/x/config"
"github.com/m3db/m3/src/x/instrument"
+ xio "github.com/m3db/m3/src/x/io"
+ xnet "github.com/m3db/m3/src/x/net"
xos "github.com/m3db/m3/src/x/os"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
@@ -68,9 +74,14 @@ import (
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
+ "github.com/go-kit/kit/log"
+ kitlogzap "github.com/go-kit/kit/log/zap"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
+ extprom "github.com/prometheus/client_golang/prometheus"
+ prometheuspromql "github.com/prometheus/prometheus/promql"
"go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
@@ -85,7 +96,7 @@ var (
Namespaces: []m3.ClusterStaticNamespaceConfiguration{
{
Namespace: "default",
- Type: storage.UnaggregatedMetricsType,
+ Type: storagemetadata.UnaggregatedMetricsType,
Retention: 2 * 24 * time.Hour,
},
},
@@ -93,6 +104,7 @@ var (
defaultDownsamplerAndWriterWorkerPoolSize = 1024
defaultCarbonIngesterWorkerPoolSize = 1024
+ defaultPerCPUMultiProcess = 0.5
)
type cleanupFn func() error
@@ -121,24 +133,60 @@ type RunOptions struct {
// on once it has opened.
ListenerCh chan<- net.Listener
- // CustomHandlers is a list of custom 3rd party handlers.
- CustomHandlers []options.CustomHandler
+ // M3MsgListenerCh is a programmatic channel to receive the M3Msg server
+ // listener on once it has opened.
+ M3MsgListenerCh chan<- net.Listener
+
+ // DownsamplerReadyCh is a programmatic channel to receive the downsampler
+ // ready signal once it is open.
+ DownsamplerReadyCh chan<- struct{}
+
+ // InstrumentOptionsReadyCh is a programmatic channel to receive a set of
+ // instrument options and metric reporters that is delivered when
+ // constructed.
+ InstrumentOptionsReadyCh chan<- InstrumentOptionsReady
+
+ // CustomHandlerOptions contains custom handler options.
+ CustomHandlerOptions options.CustomHandlerOptions
// CustomPromQLParseFunction is a custom PromQL parsing function.
CustomPromQLParseFunction promql.ParseFn
// ApplyCustomTSDBOptions is a transform that allows for custom tsdb options.
ApplyCustomTSDBOptions CustomTSDBOptionsFn
+
+ // BackendStorageTransform is a custom backend storage transform.
+ BackendStorageTransform BackendStorageTransform
+
+ // AggregatorServerOptions are server options for aggregator.
+ AggregatorServerOptions []server.AdminOption
+}
+
+// InstrumentOptionsReady is a set of instrument options
+// and metric reporters that is delivered when constructed.
+type InstrumentOptionsReady struct {
+ InstrumentOptions instrument.Options
+ MetricsReporters instrument.MetricsConfigurationReporters
}
// CustomTSDBOptionsFn is a transformation function for TSDB Options.
type CustomTSDBOptionsFn func(tsdb.Options) tsdb.Options
+// BackendStorageTransform is a transformation function for backend storage.
+type BackendStorageTransform func(
+ storage.Storage,
+ tsdb.Options,
+ instrument.Options,
+) storage.Storage
+
// Run runs the server programmatically given a filename for the configuration file.
func Run(runOpts RunOptions) {
rand.Seed(time.Now().UnixNano())
- cfg := runOpts.Config
+ var (
+ cfg = runOpts.Config
+ listenerOpts = xnet.NewListenerOptions()
+ )
logger, err := cfg.Logging.BuildLogger()
if err != nil {
@@ -152,9 +200,33 @@ func Run(runOpts RunOptions) {
xconfig.WarnOnDeprecation(cfg, logger)
- scope, closer, _, err := cfg.Metrics.NewRootScopeAndReporters(
+ if cfg.MultiProcess.Enabled {
+ runResult, err := multiProcessRun(cfg, logger, listenerOpts)
+ if err != nil {
+ logger = logger.With(zap.String("processID", multiProcessProcessID()))
+ logger.Fatal("failed to run", zap.Error(err))
+ }
+ if runResult.isParentCleanExit {
+ // Parent process clean exit.
+ os.Exit(0)
+ return
+ }
+
+ cfg = runResult.cfg
+ logger = runResult.logger
+ listenerOpts = runResult.listenerOpts
+ }
+
+ prometheusEngineRegistry := extprom.NewRegistry()
+ scope, closer, reporters, err := cfg.Metrics.NewRootScopeAndReporters(
instrument.NewRootScopeAndReportersOptions{
- OnError: func(err error) {
+ PrometheusExternalRegistries: []instrument.PrometheusExternalRegistry{
+ {
+ Registry: prometheusEngineRegistry,
+ SubScope: "coordinator_prometheus_engine",
+ },
+ },
+ PrometheusOnError: func(err error) {
// NB(r): Required otherwise collisions when registering metrics will
// cause a panic.
logger.Error("register metric error", zap.Error(err))
@@ -175,41 +247,58 @@ func Run(runOpts RunOptions) {
logger.Info("tracing disabled for m3query; set `tracing.backend` to enable")
}
+ opentracing.SetGlobalTracer(tracer)
+
instrumentOptions := instrument.NewOptions().
SetMetricsScope(scope).
SetLogger(logger).
SetTracer(tracer)
- opentracing.SetGlobalTracer(tracer)
+ if runOpts.InstrumentOptionsReadyCh != nil {
+ runOpts.InstrumentOptionsReadyCh <- InstrumentOptionsReady{
+ InstrumentOptions: instrumentOptions,
+ MetricsReporters: reporters,
+ }
+ }
// Close metrics scope
defer func() {
- if e := recover(); e != nil {
- logger.Warn("recovered from panic", zap.String("e", fmt.Sprintf("%v", e)))
- }
logger.Info("closing metrics scope")
if err := closer.Close(); err != nil {
logger.Error("unable to close metrics scope", zap.Error(err))
}
}()
- buildInfoOpts := instrumentOptions.SetMetricsScope(
- instrumentOptions.MetricsScope().SubScope("build_info"))
- buildReporter := instrument.NewBuildReporter(buildInfoOpts)
+ buildReporter := instrument.NewBuildReporter(instrumentOptions)
if err := buildReporter.Start(); err != nil {
logger.Fatal("could not start build reporter", zap.Error(err))
}
defer buildReporter.Stop()
+ storageRestrictByTags, _, err := cfg.Query.RestrictTagsAsStorageRestrictByTag()
+ if err != nil {
+ logger.Fatal("could not parse query restrict tags config", zap.Error(err))
+ }
+
var (
- backendStorage storage.Storage
- clusterClient clusterclient.Client
- downsampler downsample.Downsampler
- fetchOptsBuilderCfg = cfg.Limits.PerQuery.AsFetchOptionsBuilderOptions()
- fetchOptsBuilder = handleroptions.NewFetchOptionsBuilder(fetchOptsBuilderCfg)
- queryCtxOpts = models.QueryContextOptions{
- LimitMaxTimeseries: fetchOptsBuilderCfg.Limit,
+ backendStorage storage.Storage
+ clusterClient clusterclient.Client
+ downsampler downsample.Downsampler
+ fetchOptsBuilderLimitsOpts = cfg.Limits.PerQuery.AsFetchOptionsBuilderLimitsOptions()
+ fetchOptsBuilder = handleroptions.NewFetchOptionsBuilder(
+ handleroptions.FetchOptionsBuilderOptions{
+ Limits: fetchOptsBuilderLimitsOpts,
+ RestrictByTag: storageRestrictByTags,
+ })
+ queryCtxOpts = models.QueryContextOptions{
+ LimitMaxTimeseries: fetchOptsBuilderLimitsOpts.SeriesLimit,
+ LimitMaxDocs: fetchOptsBuilderLimitsOpts.DocsLimit,
+ RequireExhaustive: fetchOptsBuilderLimitsOpts.RequireExhaustive,
+ }
+
+ matchOptions = queryconsolidators.MatchOptions{
+ MatchType: cfg.Query.ConsolidationConfiguration.MatchType,
}
)
@@ -228,8 +317,7 @@ func Run(runOpts RunOptions) {
instrumentOptions,
cfg.ReadWorkerPool,
cfg.WriteWorkerPool,
- scope,
- )
+ scope)
if err != nil {
logger.Fatal("could not create worker pools", zap.Error(err))
}
@@ -244,16 +332,30 @@ func Run(runOpts RunOptions) {
SetLookbackDuration(lookbackDuration).
SetConsolidationFunc(consolidators.TakeLast).
SetReadWorkerPool(readWorkerPool).
- SetWriteWorkerPool(writeWorkerPool)
+ SetWriteWorkerPool(writeWorkerPool).
+ SetSeriesConsolidationMatchOptions(matchOptions)
if runOpts.ApplyCustomTSDBOptions != nil {
tsdbOpts = runOpts.ApplyCustomTSDBOptions(tsdbOpts)
}
- if cfg.Backend == config.GRPCStorageType {
+ serveOptions := serve.NewOptions(instrumentOptions)
+ for i, transform := range runOpts.AggregatorServerOptions {
+ if opts, err := transform(serveOptions); err != nil {
+ logger.Fatal("could not apply transform",
+ zap.Int("index", i), zap.Error(err))
+ } else {
+ serveOptions = opts
+ }
+ }
+
+ rwOpts := serveOptions.RWOptions()
+ switch cfg.Backend {
+ case config.GRPCStorageType:
// For grpc backend, we need to setup only the grpc client and a storage
// accompanying that client.
- poolWrapper := pools.NewPoolsWrapper(pools.BuildIteratorPools())
+ poolWrapper := pools.NewPoolsWrapper(
+ pools.BuildIteratorPools(pools.BuildIteratorPoolsOptions{}))
remoteOpts := config.RemoteOptionsFromConfig(cfg.RPC)
remotes, enabled, err := remoteClient(poolWrapper, remoteOpts,
tsdbOpts, instrumentOptions)
@@ -271,13 +373,30 @@ func Run(runOpts RunOptions) {
)
backendStorage = fanout.NewStorage(remotes, r, w, c,
- instrumentOptions)
+ tagOptions, instrumentOptions)
logger.Info("setup grpc backend")
- } else {
+
+ case config.NoopEtcdStorageType:
+ backendStorage = storage.NewNoopStorage()
+ mgmt := cfg.ClusterManagement
+
+ if mgmt == nil || len(mgmt.Etcd.ETCDClusters) == 0 {
+ logger.Fatal("must specify cluster management config and at least one etcd cluster")
+ }
+
+ opts := mgmt.Etcd.NewOptions()
+ clusterClient, err = etcdclient.NewConfigServiceClient(opts)
+ if err != nil {
+ logger.Fatal("error constructing etcd client", zap.Error(err))
+ }
+ logger.Info("setup noop storage backend with etcd")
+
+ // Empty backend defaults to M3DB.
+ case "":
// For m3db backend, we need to make connections to the m3db cluster
// which generates a session and use the storage with the session.
m3dbClusters, m3dbPoolWrapper, err = initClusters(cfg,
- runOpts.DBClient, instrumentOptions)
+ runOpts.DBClient, instrumentOptions, tsdbOpts.CustomAdminOptions())
if err != nil {
logger.Fatal("unable to init clusters", zap.Error(err))
}
@@ -285,23 +404,33 @@ func Run(runOpts RunOptions) {
var cleanup cleanupFn
backendStorage, clusterClient, downsampler, cleanup, err = newM3DBStorage(
cfg, m3dbClusters, m3dbPoolWrapper,
- runOpts, queryCtxOpts, tsdbOpts, instrumentOptions)
+ runOpts, queryCtxOpts, tsdbOpts,
+ runOpts.DownsamplerReadyCh, rwOpts, instrumentOptions)
if err != nil {
logger.Fatal("unable to setup m3db backend", zap.Error(err))
}
defer cleanup()
+
+ default:
+ logger.Fatal("unrecognized backend", zap.String("backend", string(cfg.Backend)))
}
- perQueryEnforcer, err := newConfiguredChainedEnforcer(&cfg, instrumentOptions)
+ chainedEnforcer, chainedEnforceCloser, err := newConfiguredChainedEnforcer(&cfg,
+ instrumentOptions)
if err != nil {
- logger.Fatal("unable to setup perQueryEnforcer", zap.Error(err))
+ logger.Fatal("unable to setup chained enforcer", zap.Error(err))
+ }
+
+ defer chainedEnforceCloser.Close()
+ if fn := runOpts.BackendStorageTransform; fn != nil {
+ backendStorage = fn(backendStorage, tsdbOpts, instrumentOptions)
}
engineOpts := executor.NewEngineOptions().
SetStore(backendStorage).
SetLookbackDuration(*cfg.LookbackDuration).
- SetGlobalEnforcer(perQueryEnforcer).
+ SetGlobalEnforcer(chainedEnforcer).
SetInstrumentOptions(instrumentOptions.
SetMetricsScope(instrumentOptions.MetricsScope().SubScope("engine")))
if fn := runOpts.CustomPromQLParseFunction; fn != nil {
@@ -310,7 +439,11 @@ func Run(runOpts RunOptions) {
}
engine := executor.NewEngine(engineOpts)
- downsamplerAndWriter, err := newDownsamplerAndWriter(backendStorage, downsampler)
+ downsamplerAndWriter, err := newDownsamplerAndWriter(
+ backendStorage,
+ downsampler,
+ instrumentOptions,
+ )
if err != nil {
logger.Fatal("unable to create new downsampler and writer", zap.Error(err))
}
@@ -330,16 +463,23 @@ func Run(runOpts RunOptions) {
}
}
+ prometheusEngine := newPromQLEngine(cfg.Query, prometheusEngineRegistry,
+ instrumentOptions)
handlerOptions, err := options.NewHandlerOptions(downsamplerAndWriter,
- tagOptions, engine, m3dbClusters, clusterClient, cfg, runOpts.DBConfig,
- perQueryEnforcer, fetchOptsBuilder, queryCtxOpts, instrumentOptions,
- cpuProfileDuration, []string{handleroptions.M3DBServiceName},
- serviceOptionDefaults)
+ tagOptions, engine, prometheusEngine, m3dbClusters, clusterClient, cfg,
+ runOpts.DBConfig, chainedEnforcer, fetchOptsBuilder, queryCtxOpts,
+ instrumentOptions, cpuProfileDuration, []string{handleroptions.M3DBServiceName},
+ serviceOptionDefaults, httpd.NewQueryRouter(), httpd.NewQueryRouter())
if err != nil {
logger.Fatal("unable to set up handler options", zap.Error(err))
}
- handler := httpd.NewHandler(handlerOptions, runOpts.CustomHandlers...)
+ if fn := runOpts.CustomHandlerOptions.OptionTransformFn; fn != nil {
+ handlerOptions = fn(handlerOptions)
+ }
+
+ customHandlers := runOpts.CustomHandlerOptions.CustomHandlers
+ handler := httpd.NewHandler(handlerOptions, customHandlers...)
if err := handler.RegisterRoutes(); err != nil {
logger.Fatal("unable to register routes", zap.Error(err))
}
@@ -357,7 +497,7 @@ func Run(runOpts RunOptions) {
}
}()
- listener, err := net.Listen("tcp", listenAddress)
+ listener, err := listenerOpts.Listen("tcp", listenAddress)
if err != nil {
logger.Fatal("unable to listen on listen address",
zap.String("address", listenAddress),
@@ -367,7 +507,7 @@ func Run(runOpts RunOptions) {
runOpts.ListenerCh <- listener
}
go func() {
- logger.Info("starting API server", zap.String("address", listenAddress))
+ logger.Info("starting API server", zap.Stringer("address", listener.Addr()))
if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed {
logger.Fatal("server serve error",
zap.String("address", listenAddress),
@@ -378,33 +518,41 @@ func Run(runOpts RunOptions) {
if cfg.Ingest != nil {
logger.Info("starting m3msg server",
zap.String("address", cfg.Ingest.M3Msg.Server.ListenAddress))
- ingester, err := cfg.Ingest.Ingester.NewIngester(backendStorage, instrumentOptions)
+ ingester, err := cfg.Ingest.Ingester.NewIngester(backendStorage,
+ tagOptions, instrumentOptions)
if err != nil {
logger.Fatal("unable to create ingester", zap.Error(err))
}
server, err := cfg.Ingest.M3Msg.NewServer(
- ingester.Ingest,
- instrumentOptions.SetMetricsScope(scope.SubScope("ingest-m3msg")),
- )
-
+ ingester.Ingest, rwOpts,
+ instrumentOptions.SetMetricsScope(scope.SubScope("ingest-m3msg")))
if err != nil {
logger.Fatal("unable to create m3msg server", zap.Error(err))
}
- if err := server.ListenAndServe(); err != nil {
+ listener, err := listenerOpts.Listen("tcp", cfg.Ingest.M3Msg.Server.ListenAddress)
+ if err != nil {
+ logger.Fatal("unable to open m3msg server", zap.Error(err))
+ }
+
+ if runOpts.M3MsgListenerCh != nil {
+ runOpts.M3MsgListenerCh <- listener
+ }
+
+ if err := server.Serve(listener); err != nil {
logger.Fatal("unable to listen on ingest server", zap.Error(err))
}
- logger.Info("started m3msg server ")
+ logger.Info("started m3msg server", zap.Stringer("addr", listener.Addr()))
defer server.Close()
} else {
logger.Info("no m3msg server configured")
}
if cfg.Carbon != nil && cfg.Carbon.Ingester != nil {
- server, ok := startCarbonIngestion(cfg.Carbon, instrumentOptions,
- logger, m3dbClusters, downsamplerAndWriter)
+ server, ok := startCarbonIngestion(cfg.Carbon, listenerOpts,
+ instrumentOptions, logger, m3dbClusters, downsamplerAndWriter)
if ok {
defer server.Close()
}
@@ -424,6 +572,8 @@ func newM3DBStorage(
runOpts RunOptions,
queryContextOptions models.QueryContextOptions,
tsdbOpts tsdb.Options,
+ downsamplerReadyCh chan<- struct{},
+ rwOpts xio.Options,
instrumentOptions instrument.Options,
) (storage.Storage, clusterclient.Client, downsample.Downsampler, cleanupFn, error) {
var (
@@ -485,8 +635,21 @@ func newM3DBStorage(
}
newDownsamplerFn := func() (downsample.Downsampler, error) {
- return newDownsampler(cfg.Downsample, clusterClient,
- fanoutStorage, autoMappingRules, tsdbOpts.TagOptions(), instrumentOptions)
+ downsampler, err := newDownsampler(
+ cfg.Downsample, clusterClient,
+ fanoutStorage, autoMappingRules,
+ tsdbOpts.TagOptions(), instrumentOptions, rwOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ // Notify the downsampler ready channel that
+ // the downsampler has now been created and is ready.
+ if downsamplerReadyCh != nil {
+ downsamplerReadyCh <- struct{}{}
+ }
+
+ return downsampler, nil
}
if clusterClientWaitCh != nil {
@@ -532,6 +695,7 @@ func newDownsampler(
autoMappingRules []downsample.AutoMappingRule,
tagOptions models.TagOptions,
instrumentOpts instrument.Options,
+ rwOpts xio.Options,
) (downsample.Downsampler, error) {
// Namespace the downsampler metrics.
instrumentOpts = instrumentOpts.SetMetricsScope(
@@ -549,7 +713,7 @@ func newDownsampler(
}
tagEncoderOptions := serialize.NewTagEncoderOptions()
- tagDecoderOptions := serialize.NewTagDecoderOptions()
+ tagDecoderOptions := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{})
tagEncoderPoolOptions := pool.NewObjectPoolOptions().
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
@@ -558,22 +722,28 @@ func newDownsampler(
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("tag-decoder-pool")))
+ metricsAppenderPoolOptions := pool.NewObjectPoolOptions().
+ SetInstrumentOptions(instrumentOpts.
+ SetMetricsScope(instrumentOpts.MetricsScope().
+ SubScope("metrics-appender-pool")))
downsampler, err := cfg.NewDownsampler(downsample.DownsamplerOptions{
- Storage: storage,
- ClusterClient: clusterManagementClient,
- RulesKVStore: kvStore,
- AutoMappingRules: autoMappingRules,
- ClockOptions: clock.NewOptions(),
- InstrumentOptions: instrumentOpts,
- TagEncoderOptions: tagEncoderOptions,
- TagDecoderOptions: tagDecoderOptions,
- TagEncoderPoolOptions: tagEncoderPoolOptions,
- TagDecoderPoolOptions: tagDecoderPoolOptions,
- TagOptions: tagOptions,
+ Storage: storage,
+ ClusterClient: clusterManagementClient,
+ RulesKVStore: kvStore,
+ AutoMappingRules: autoMappingRules,
+ ClockOptions: clock.NewOptions(),
+ InstrumentOptions: instrumentOpts,
+ TagEncoderOptions: tagEncoderOptions,
+ TagDecoderOptions: tagDecoderOptions,
+ TagEncoderPoolOptions: tagEncoderPoolOptions,
+ TagDecoderPoolOptions: tagDecoderPoolOptions,
+ TagOptions: tagOptions,
+ MetricsAppenderPoolOptions: metricsAppenderPoolOptions,
+ RWOptions: rwOpts,
})
if err != nil {
- return nil, errors.Wrap(err, "unable to create downsampler")
+ return nil, fmt.Errorf("unable to create downsampler: %v", err)
}
return downsampler, nil
@@ -586,7 +756,7 @@ func newDownsamplerAutoMappingRules(
for _, namespace := range namespaces {
opts := namespace.Options()
attrs := opts.Attributes()
- if attrs.MetricsType == storage.AggregatedMetricsType {
+ if attrs.MetricsType == storagemetadata.AggregatedMetricsType {
downsampleOpts, err := opts.DownsampleOptions()
if err != nil {
errFmt := "unable to resolve downsample options for namespace: %v"
@@ -614,6 +784,7 @@ func initClusters(
cfg config.Configuration,
dbClientCh <-chan client.Client,
instrumentOpts instrument.Options,
+ customAdminOptions []client.CustomAdminOption,
) (m3.Clusters, *pools.PoolWrapper, error) {
instrumentOpts = instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().SubScope("m3db-client"))
@@ -627,13 +798,15 @@ func initClusters(
if len(cfg.Clusters) > 0 {
clusters, err = cfg.Clusters.NewClusters(instrumentOpts,
m3.ClustersStaticConfigurationOptions{
- AsyncSessions: true,
+ AsyncSessions: true,
+ CustomAdminOptions: customAdminOptions,
})
if err != nil {
return nil, nil, errors.Wrap(err, "unable to connect to clusters")
}
- poolWrapper = pools.NewPoolsWrapper(pools.BuildIteratorPools())
+ poolWrapper = pools.NewPoolsWrapper(
+ pools.BuildIteratorPools(pools.BuildIteratorPoolsOptions{}))
} else {
localCfg := cfg.Local
if localCfg == nil {
@@ -657,7 +830,8 @@ func initClusters(
clusters, err = clustersCfg.NewClusters(instrumentOpts,
m3.ClustersStaticConfigurationOptions{
- ProvidedSession: session,
+ ProvidedSession: session,
+ CustomAdminOptions: customAdminOptions,
})
if err != nil {
return nil, nil, errors.Wrap(err, "unable to connect to clusters")
@@ -771,7 +945,7 @@ func newStorages(
}
fanoutStorage := fanout.NewStorage(stores, readFilter, writeFilter,
- completeTagsFilter, instrumentOpts)
+ completeTagsFilter, opts.TagOptions(), instrumentOpts)
return fanoutStorage, cleanup, nil
}
@@ -866,6 +1040,7 @@ func startGRPCServer(
func startCarbonIngestion(
cfg *config.CarbonConfiguration,
+ listenerOpts xnet.ListenerOptions,
iOpts instrument.Options,
logger *zap.Logger,
m3dbClusters m3.Clusters,
@@ -969,7 +1144,9 @@ func startCarbonIngestion(
// Start server.
var (
- serverOpts = xserver.NewOptions().SetInstrumentOptions(carbonIOpts)
+ serverOpts = xserver.NewOptions().
+ SetInstrumentOptions(carbonIOpts).
+ SetListenerOptions(listenerOpts)
carbonListenAddress = ingesterCfg.ListenAddressOrDefault()
carbonServer = xserver.NewServer(carbonListenAddress, ingester, serverOpts)
)
@@ -989,7 +1166,11 @@ func startCarbonIngestion(
return carbonServer, true
}
-func newDownsamplerAndWriter(storage storage.Storage, downsampler downsample.Downsampler) (ingest.DownsamplerAndWriter, error) {
+func newDownsamplerAndWriter(
+ storage storage.Storage,
+ downsampler downsample.Downsampler,
+ iOpts instrument.Options,
+) (ingest.DownsamplerAndWriter, error) {
// Make sure the downsampler and writer gets its own PooledWorkerPool and that its not shared with any other
// codepaths because PooledWorkerPools can deadlock if used recursively.
downAndWriterWorkerPoolOpts := xsync.NewPooledWorkerPoolOptions().
@@ -1002,5 +1183,22 @@ func newDownsamplerAndWriter(storage storage.Storage, downsampler downsample.Dow
}
downAndWriteWorkerPool.Init()
- return ingest.NewDownsamplerAndWriter(storage, downsampler, downAndWriteWorkerPool), nil
+ return ingest.NewDownsamplerAndWriter(storage, downsampler, downAndWriteWorkerPool, iOpts), nil
+}
+
+func newPromQLEngine(
+ cfg config.QueryConfiguration,
+ registry *extprom.Registry,
+ instrumentOpts instrument.Options,
+) *prometheuspromql.Engine {
+ var (
+ kitLogger = kitlogzap.NewZapSugarLogger(instrumentOpts.Logger(), zapcore.InfoLevel)
+ opts = prometheuspromql.EngineOpts{
+ Logger: log.With(kitLogger, "component", "prometheus_engine"),
+ Reg: registry,
+ MaxSamples: cfg.Prometheus.MaxSamplesPerQueryOrDefault(),
+ Timeout: cfg.TimeoutOrDefault(),
+ }
+ )
+ return prometheuspromql.NewEngine(opts)
}
diff --git a/src/query/server/query_test.go b/src/query/server/query_test.go
index b8c628a2a4..9563f98f58 100644
--- a/src/query/server/query_test.go
+++ b/src/query/server/query_test.go
@@ -31,21 +31,34 @@ import (
"testing"
"time"
+ clusterclient "github.com/m3db/m3/src/cluster/client"
+ "github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/metrics/generated/proto/metricpb"
+ "github.com/m3db/m3/src/metrics/generated/proto/rulepb"
+ "github.com/m3db/m3/src/metrics/policy"
+ "github.com/m3db/m3/src/msg/generated/proto/msgpb"
+ m3msgproto "github.com/m3db/m3/src/msg/protocol/proto"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote/test"
"github.com/m3db/m3/src/query/cost"
rpc "github.com/m3db/m3/src/query/generated/proto/rpcpb"
"github.com/m3db/m3/src/query/storage/m3"
+ xclock "github.com/m3db/m3/src/x/clock"
+ "github.com/m3db/m3/src/x/close"
xconfig "github.com/m3db/m3/src/x/config"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ "github.com/m3db/m3/src/x/serialize"
xtest "github.com/m3db/m3/src/x/test"
+ "github.com/gogo/protobuf/proto"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/uber-go/tally"
+ "go.uber.org/atomic"
"google.golang.org/grpc"
)
@@ -71,6 +84,28 @@ clusters:
- namespace: prometheus_metrics
type: unaggregated
retention: 48h
+ - namespace: prometheus_metrics_1m_aggregated
+ type: aggregated
+ retention: 120h
+ resolution: 1m
+ downsample:
+ all: false
+
+ingest:
+ ingester:
+ workerPoolSize: 100
+ opPool:
+ size: 100
+ retry:
+ maxRetries: 3
+ jitter: true
+ logSampleRate: 0.01
+ m3msg:
+ server:
+ listenAddress: "0.0.0.0:0"
+ retry:
+ maxBackoff: 10s
+ jitter: true
tagOptions:
metricName: "_new"
@@ -79,18 +114,17 @@ tagOptions:
readWorkerPoolPolicy:
grow: true
size: 100
- shards: 1000
+ shards: 100
killProbability: 0.3
writeWorkerPoolPolicy:
grow: true
size: 100
- shards: 1000
+ shards: 100
killProbability: 0.3
-
`
-func TestRun(t *testing.T) {
+func TestWrite(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
@@ -123,16 +157,16 @@ func TestRun(t *testing.T) {
gomock.Any(),
nil)
}
- session.EXPECT().Close()
+ session.EXPECT().Close().AnyTimes()
dbClient := client.NewMockClient(ctrl)
- dbClient.EXPECT().DefaultSession().Return(session, nil)
+ dbClient.EXPECT().DefaultSession().Return(session, nil).AnyTimes()
cfg.Clusters[0].NewClientFromConfig = m3.NewClientFromConfig(
func(
cfg client.Configuration,
params client.ConfigurationParameters,
- custom ...client.CustomOption,
+ custom ...client.CustomAdminOption,
) (client.Client, error) {
return dbClient, nil
})
@@ -140,15 +174,41 @@ func TestRun(t *testing.T) {
interruptCh := make(chan error, 1)
doneCh := make(chan struct{}, 1)
listenerCh := make(chan net.Listener, 1)
+
+ rulesNamespacesValue := kv.NewMockValue(ctrl)
+ rulesNamespacesValue.EXPECT().Version().Return(0).AnyTimes()
+ rulesNamespacesValue.EXPECT().Unmarshal(gomock.Any()).DoAndReturn(func(v proto.Message) error {
+ msg := v.(*rulepb.Namespaces)
+ *msg = rulepb.Namespaces{}
+ return nil
+ })
+ rulesNamespacesWatchable := kv.NewValueWatchable()
+ rulesNamespacesWatchable.Update(rulesNamespacesValue)
+ _, rulesNamespacesWatch, err := rulesNamespacesWatchable.Watch()
+ require.NoError(t, err)
+ kvClient := kv.NewMockStore(ctrl)
+ kvClient.EXPECT().Watch(gomock.Any()).Return(rulesNamespacesWatch, nil).AnyTimes()
+ clusterClient := clusterclient.NewMockClient(ctrl)
+ clusterClient.EXPECT().KV().Return(kvClient, nil).AnyTimes()
+ clusterClientCh := make(chan clusterclient.Client, 1)
+ clusterClientCh <- clusterClient
+
+ downsamplerReadyCh := make(chan struct{}, 1)
+
go func() {
Run(RunOptions{
- Config: cfg,
- InterruptCh: interruptCh,
- ListenerCh: listenerCh,
+ Config: cfg,
+ InterruptCh: interruptCh,
+ ListenerCh: listenerCh,
+ ClusterClient: clusterClientCh,
+ DownsamplerReadyCh: downsamplerReadyCh,
})
doneCh <- struct{}{}
}()
+ // Wait for downsampler to be ready.
+ <-downsamplerReadyCh
+
// Wait for listener
listener := <-listenerCh
addr := listener.Addr().String()
@@ -171,6 +231,149 @@ func TestRun(t *testing.T) {
<-doneCh
}
+// TestIngest will test an M3Msg being ingested by the coordinator, it also
+// makes sure that the tag options is correctly propagated from the config
+// all the way to the M3Msg ingester and when written to the DB will include
+// the correctly formed ID.
+func TestIngest(t *testing.T) {
+ ctrl := gomock.NewController(xtest.Reporter{T: t})
+ defer ctrl.Finish()
+
+ configFile, close := newTestFile(t, "config.yaml", configYAML)
+ defer close()
+
+ var cfg config.Configuration
+ err := xconfig.LoadFile(&cfg, configFile.Name(), xconfig.Options{})
+ require.NoError(t, err)
+
+ // Override the client creation
+ require.Equal(t, 1, len(cfg.Clusters))
+
+ numWrites := atomic.NewInt32(0)
+
+ session := client.NewMockSession(ctrl)
+ session.EXPECT().
+ WriteTagged(ident.NewIDMatcher("prometheus_metrics_1m_aggregated"),
+ ident.NewIDMatcher(`{_new="first",biz="baz",foo="bar"}`),
+ gomock.Any(),
+ gomock.Any(),
+ 42.0,
+ gomock.Any(),
+ nil).
+ Do(func(_, _, _, _, _, _, _ interface{}) {
+ numWrites.Add(1)
+ })
+ session.EXPECT().Close().AnyTimes()
+
+ dbClient := client.NewMockClient(ctrl)
+ dbClient.EXPECT().DefaultSession().Return(session, nil).AnyTimes()
+
+ cfg.Clusters[0].NewClientFromConfig = m3.NewClientFromConfig(
+ func(
+ cfg client.Configuration,
+ params client.ConfigurationParameters,
+ custom ...client.CustomAdminOption,
+ ) (client.Client, error) {
+ return dbClient, nil
+ })
+
+ interruptCh := make(chan error, 1)
+ doneCh := make(chan struct{}, 1)
+ listenerCh := make(chan net.Listener, 1)
+ m3msgListenerCh := make(chan net.Listener, 1)
+
+ rulesNamespacesValue := kv.NewMockValue(ctrl)
+ rulesNamespacesValue.EXPECT().Version().Return(0).AnyTimes()
+ rulesNamespacesValue.EXPECT().Unmarshal(gomock.Any()).DoAndReturn(func(v proto.Message) error {
+ msg := v.(*rulepb.Namespaces)
+ *msg = rulepb.Namespaces{}
+ return nil
+ })
+ rulesNamespacesWatchable := kv.NewValueWatchable()
+ rulesNamespacesWatchable.Update(rulesNamespacesValue)
+ _, rulesNamespacesWatch, err := rulesNamespacesWatchable.Watch()
+ require.NoError(t, err)
+ kvClient := kv.NewMockStore(ctrl)
+ kvClient.EXPECT().Watch(gomock.Any()).Return(rulesNamespacesWatch, nil).AnyTimes()
+ clusterClient := clusterclient.NewMockClient(ctrl)
+ clusterClient.EXPECT().KV().Return(kvClient, nil).AnyTimes()
+ clusterClientCh := make(chan clusterclient.Client, 1)
+ clusterClientCh <- clusterClient
+
+ downsamplerReadyCh := make(chan struct{}, 1)
+
+ go func() {
+ Run(RunOptions{
+ Config: cfg,
+ InterruptCh: interruptCh,
+ ListenerCh: listenerCh,
+ M3MsgListenerCh: m3msgListenerCh,
+ ClusterClient: clusterClientCh,
+ DownsamplerReadyCh: downsamplerReadyCh,
+ })
+ doneCh <- struct{}{}
+ }()
+
+ // Wait for downsampler to be ready.
+ <-downsamplerReadyCh
+
+ // Wait for listener
+ listener := <-listenerCh
+ addr := listener.Addr().String()
+
+ // Wait for server to come up
+ waitForServerHealthy(t, addr)
+
+ // Send ingest message.
+ tagEncoderPool := serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(), nil)
+ tagEncoderPool.Init()
+ tagEncoder := tagEncoderPool.Get()
+ err = tagEncoder.Encode(ident.MustNewTagStringsIterator(
+ "_new", "first",
+ "biz", "baz",
+ "foo", "bar"))
+ require.NoError(t, err)
+ id, ok := tagEncoder.Data()
+ require.True(t, ok)
+ sp, err := policy.MustParseStoragePolicy("1m:120h").Proto()
+ require.NoError(t, err)
+
+ // Copy message.
+ message, err := proto.Marshal(&metricpb.AggregatedMetric{
+ Metric: metricpb.TimedMetricWithStoragePolicy{
+ TimedMetric: metricpb.TimedMetric{
+ Type: metricpb.MetricType_GAUGE,
+ Id: id.Bytes(),
+ TimeNanos: time.Now().UnixNano(),
+ Value: 42,
+ },
+ StoragePolicy: *sp,
+ },
+ })
+ require.NoError(t, err)
+
+ // Encode as m3msg protobuf message.
+ encoder := m3msgproto.NewEncoder(m3msgproto.NewOptions())
+ err = encoder.Encode(&msgpb.Message{
+ Value: message,
+ })
+ require.NoError(t, err)
+ m3msgListener := <-m3msgListenerCh
+ conn, err := net.Dial("tcp", m3msgListener.Addr().String())
+ require.NoError(t, err)
+ _, err = conn.Write(encoder.Bytes())
+ require.NoError(t, err)
+
+ // Now wait for write.
+ xclock.WaitUntil(func() bool {
+ return numWrites.Load() == 1
+ }, 30*time.Second)
+
+ // Ensure close server performs as expected
+ interruptCh <- fmt.Errorf("interrupt")
+ <-doneCh
+}
+
type closeFn func()
func newTestFile(t *testing.T, fileName, contents string) (*os.File, closeFn) {
@@ -309,8 +512,13 @@ func TestNewPerQueryEnforcer(t *testing.T) {
Global cost.ChainedEnforcer
Query cost.ChainedEnforcer
Block cost.ChainedEnforcer
+ Closer close.SimpleCloser
}
+ scope := tally.NewTestScope("", nil)
+ instrumentOpts := instrument.NewTestOptions(t).
+ SetMetricsScope(scope)
+
setup := func(t *testing.T, globalLimit, queryLimit int) testContext {
cfg := &config.Configuration{
Limits: config.LimitsConfiguration{
@@ -323,7 +531,7 @@ func TestNewPerQueryEnforcer(t *testing.T) {
},
}
- global, err := newConfiguredChainedEnforcer(cfg, instrument.NewOptions())
+ global, closer, err := newConfiguredChainedEnforcer(cfg, instrumentOpts)
require.NoError(t, err)
queryLvl := global.Child(cost.QueryLevel)
@@ -333,13 +541,15 @@ func TestNewPerQueryEnforcer(t *testing.T) {
Global: global,
Query: queryLvl,
Block: blockLvl,
+ Closer: closer,
}
}
tctx := setup(t, 100, 10)
+ defer tctx.Closer.Close()
- // spot check that limits are setup properly for each level
- r := tctx.Block.Add(11)
+ // Spot check that limits are setup properly for each level.
+ r := tctx.Query.Add(11)
require.Error(t, r.Error)
floatsEqual := func(f1, f2 float64) {
@@ -354,6 +564,72 @@ func TestNewPerQueryEnforcer(t *testing.T) {
r, _ = tctx.Global.State()
floatsEqual(float64(r.Cost), 11)
require.NoError(t, r.Error)
+
+ // Wait for stats reporting to start.
+ start := time.Now()
+ for time.Since(start) < 15*time.Second {
+ gauges := scope.Snapshot().Gauges()
+ globalEnabled, globalOk := gauges["cost.limits.enabled+limiter=global"]
+ queryEnabled, queryOk := gauges["cost.limits.enabled+limiter=query"]
+ if globalOk && queryOk && globalEnabled.Value() == 1 && queryEnabled.Value() == 1 {
+ break
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // Check stats.
+ expectCounterValues := map[string]int64{
+ "cost.reporter.over_datapoints_limit+enabled=false,limiter=global": 0,
+ "cost.reporter.over_datapoints_limit+enabled=true,limiter=global": 0,
+ "cost.reporter.datapoints_counter+limiter=global": 11,
+ "cost.reporter.over_datapoints_limit+enabled=false,limiter=query": 0,
+ "cost.reporter.over_datapoints_limit+enabled=true,limiter=query": 1,
+ }
+ expectGaugeValues := map[string]float64{
+ "cost.limits.threshold+limiter=global": 100,
+ "cost.limits.enabled+limiter=global": 1,
+ "cost.reporter.datapoints+limiter=global": 11,
+ "cost.limits.threshold+limiter=query": 10,
+ "cost.limits.enabled+limiter=query": 1,
+ }
+
+ snapshot := scope.Snapshot()
+ actualCounterValues := make(map[string]int64)
+ for k, v := range snapshot.Counters() {
+ actualCounterValues[k] = v.Value()
+
+ expected, ok := expectCounterValues[k]
+ if !ok {
+ continue
+ }
+
+ // Check match.
+ assert.Equal(t, expected, v.Value(),
+ fmt.Sprintf("stat mismatch: stat=%s", k))
+
+ delete(expectCounterValues, k)
+ }
+ assert.Equal(t, 0, len(expectCounterValues),
+ fmt.Sprintf("missing stats: %+v", expectCounterValues))
+
+ actualGaugeValues := make(map[string]float64)
+ for k, v := range snapshot.Gauges() {
+ actualGaugeValues[k] = v.Value()
+
+ expected, ok := expectGaugeValues[k]
+ if !ok {
+ continue
+ }
+
+ // Check match.
+ assert.Equal(t, expected, v.Value(),
+ fmt.Sprintf("stat mismatch: stat=%s", k))
+
+ delete(expectGaugeValues, k)
+ }
+ assert.Equal(t, 0, len(expectGaugeValues),
+ fmt.Sprintf("missing stats: %+v", expectGaugeValues))
}
var _ rpc.QueryServer = &queryServer{}
diff --git a/src/query/storage/converter.go b/src/query/storage/converter.go
index 7dcb3e3f34..5c20f89521 100644
--- a/src/query/storage/converter.go
+++ b/src/query/storage/converter.go
@@ -29,13 +29,15 @@ import (
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/ts"
+
+ "github.com/prometheus/common/model"
)
// The default name for the name and bucket tags in Prometheus metrics.
// This can be overwritten by setting tagOptions in the config.
var (
- promDefaultName = []byte("__name__")
- promDefaultBucketName = []byte("le")
+ promDefaultName = []byte(model.MetricNameLabel) // __name__
+ promDefaultBucketName = []byte(model.BucketLabel) // le
)
// PromLabelsToM3Tags converts Prometheus labels to M3 tags
@@ -64,6 +66,37 @@ func PromLabelsToM3Tags(
return tags.AddTags(tagList)
}
+// PromTimeSeriesToSeriesAttributes extracts the series info from a prometheus
+// timeseries.
+func PromTimeSeriesToSeriesAttributes(series prompb.TimeSeries) (ts.SeriesAttributes, error) {
+ var (
+ sourceType ts.SourceType
+ metricType ts.MetricType
+ )
+ switch series.Source {
+ case prompb.Source_PROMETHEUS:
+ sourceType = ts.SourceTypePrometheus
+ case prompb.Source_GRAPHITE:
+ sourceType = ts.SourceTypeGraphite
+ default:
+ return ts.SeriesAttributes{}, fmt.Errorf("invalid source type %v", series.Source)
+ }
+ switch series.Type {
+ case prompb.Type_COUNTER:
+ metricType = ts.MetricTypeCounter
+ case prompb.Type_GAUGE:
+ metricType = ts.MetricTypeGauge
+ case prompb.Type_TIMER:
+ metricType = ts.MetricTypeTimer
+ default:
+ return ts.SeriesAttributes{}, fmt.Errorf("invalid metric type %v", series.Type)
+ }
+ return ts.SeriesAttributes{
+ Type: metricType,
+ Source: sourceType,
+ }, nil
+}
+
// PromSamplesToM3Datapoints converts Prometheus samples to M3 datapoints
func PromSamplesToM3Datapoints(samples []prompb.Sample) ts.Datapoints {
datapoints := make(ts.Datapoints, 0, len(samples))
@@ -82,10 +115,17 @@ func PromReadQueryToM3(query *prompb.Query) (*FetchQuery, error) {
return nil, err
}
+ start := PromTimestampToTime(query.StartTimestampMs)
+ end := PromTimestampToTime(query.EndTimestampMs)
+ if start.After(end) {
+ start = time.Time{}
+ end = time.Now()
+ }
+
return &FetchQuery{
TagMatchers: tagMatchers,
- Start: PromTimestampToTime(query.StartTimestampMs),
- End: PromTimestampToTime(query.EndTimestampMs),
+ Start: start,
+ End: end,
}, nil
}
diff --git a/src/query/storage/converter_test.go b/src/query/storage/converter_test.go
index 733bdef9d2..b684dd23ef 100644
--- a/src/query/storage/converter_test.go
+++ b/src/query/storage/converter_test.go
@@ -71,6 +71,19 @@ var (
value = []byte("bar")
)
+func TestPromReadQueryToM3BadStartEnd(t *testing.T) {
+ q, err := PromReadQueryToM3(&prompb.Query{
+ StartTimestampMs: 100,
+ EndTimestampMs: -100,
+ })
+
+ require.NoError(t, err)
+ assert.Equal(t, time.Time{}, q.Start)
+ // NB: check end is approximately correct.
+ diff := math.Abs(float64(time.Since(q.End)))
+ assert.True(t, diff < float64(time.Minute))
+}
+
func TestPromReadQueryToM3(t *testing.T) {
tests := []struct {
name string
diff --git a/src/query/storage/fanout/storage.go b/src/query/storage/fanout/storage.go
index 5c49fe0d02..64593a24c3 100644
--- a/src/query/storage/fanout/storage.go
+++ b/src/query/storage/fanout/storage.go
@@ -32,6 +32,7 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/policy/filter"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/util/execution"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/instrument"
@@ -39,13 +40,17 @@ import (
"go.uber.org/zap"
)
-const initMetricMapSize = 10
+const (
+ initMetricMapSize = 10
+ fetchDataWarningError = "fetch_data_error"
+)
type fanoutStorage struct {
stores []storage.Storage
fetchFilter filter.Storage
writeFilter filter.Storage
completeTagsFilter filter.StorageCompleteTags
+ tagOptions models.TagOptions
instrumentOpts instrument.Options
}
@@ -55,6 +60,7 @@ func NewStorage(
fetchFilter filter.Storage,
writeFilter filter.Storage,
completeTagsFilter filter.StorageCompleteTags,
+ tagOptions models.TagOptions,
instrumentOpts instrument.Options,
) storage.Storage {
return &fanoutStorage{
@@ -62,6 +68,7 @@ func NewStorage(
fetchFilter: fetchFilter,
writeFilter: writeFilter,
completeTagsFilter: completeTagsFilter,
+ tagOptions: tagOptions,
instrumentOpts: instrumentOpts,
}
}
@@ -97,7 +104,7 @@ func (s *fanoutStorage) FetchProm(
if err != nil {
if warning, err := storage.IsWarning(store, err); warning {
- resultMeta.AddWarning(store.Name(), "fetch_prom_warning")
+ resultMeta.AddWarning(store.Name(), fetchDataWarningError)
numWarning++
s.instrumentOpts.Logger().Warn(
"partial results: fanout to store returned warning",
@@ -178,7 +185,7 @@ func (s *fanoutStorage) FetchBlocks(
if err != nil {
if warning, err := storage.IsWarning(store, err); warning {
- resultMeta.AddWarning(store.Name(), "fetch_blocks_warning")
+ resultMeta.AddWarning(store.Name(), fetchDataWarningError)
numWarning++
s.instrumentOpts.Logger().Warn(
"partial results: fanout to store returned warning",
@@ -283,7 +290,7 @@ func (s *fanoutStorage) SearchSeries(
results, err := store.SearchSeries(ctx, query, options)
if err != nil {
if warning, err := storage.IsWarning(store, err); warning {
- metadata.AddWarning(store.Name(), "search_series_warning")
+ metadata.AddWarning(store.Name(), fetchDataWarningError)
s.instrumentOpts.Logger().Warn(
"partial results: fanout to store returned warning",
zap.Error(err),
@@ -329,20 +336,21 @@ func (s *fanoutStorage) CompleteTags(
ctx context.Context,
query *storage.CompleteTagsQuery,
options *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
stores := filterCompleteTagsStores(s.stores, s.completeTagsFilter, *query)
// short circuit complete tags
if len(stores) == 1 {
return stores[0].CompleteTags(ctx, query, options)
}
- accumulatedTags := storage.NewCompleteTagsResultBuilder(query.CompleteNameOnly)
+ accumulatedTags := consolidators.NewCompleteTagsResultBuilder(
+ query.CompleteNameOnly, s.tagOptions)
metadata := block.NewResultMetadata()
for _, store := range stores {
result, err := store.CompleteTags(ctx, query, options)
if err != nil {
if warning, err := storage.IsWarning(store, err); warning {
- metadata.AddWarning(store.Name(), "complete_tags_warning")
+ metadata.AddWarning(store.Name(), fetchDataWarningError)
s.instrumentOpts.Logger().Warn(
"partial results: fanout to store returned warning",
zap.Error(err),
@@ -371,9 +379,9 @@ func (s *fanoutStorage) CompleteTags(
}
func applyOptions(
- result storage.CompleteTagsResult,
+ result consolidators.CompleteTagsResult,
opts *storage.FetchOptions,
-) storage.CompleteTagsResult {
+) consolidators.CompleteTagsResult {
if opts.RestrictQueryOptions == nil {
return result
}
diff --git a/src/query/storage/fanout/storage_test.go b/src/query/storage/fanout/storage_test.go
index b78c59be42..8593f2020f 100644
--- a/src/query/storage/fanout/storage_test.go
+++ b/src/query/storage/fanout/storage_test.go
@@ -36,12 +36,15 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/policy/filter"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/test/m3"
"github.com/m3db/m3/src/query/test/seriesiter"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
xtest "github.com/m3db/m3/src/x/test"
+ xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
@@ -77,6 +80,23 @@ type fetchResponse struct {
err error
}
+func newTestIteratorPools(ctrl *gomock.Controller) encoding.IteratorPools {
+ pools := encoding.NewMockIteratorPools(ctrl)
+
+ mutablePool := encoding.NewMockMutableSeriesIteratorsPool(ctrl)
+ mutablePool.EXPECT().
+ Get(gomock.Any()).
+ DoAndReturn(func(size int) encoding.MutableSeriesIterators {
+ return encoding.NewSeriesIterators(make([]encoding.SeriesIterator, 0, size), mutablePool)
+ }).
+ AnyTimes()
+ mutablePool.EXPECT().Put(gomock.Any()).AnyTimes()
+
+ pools.EXPECT().MutableSeriesIterators().Return(mutablePool).AnyTimes()
+
+ return pools
+}
+
func setupFanoutRead(t *testing.T, output bool, response ...*fetchResponse) storage.Storage {
if len(response) == 0 {
response = []*fetchResponse{{err: fmt.Errorf("unable to get response")}}
@@ -85,7 +105,7 @@ func setupFanoutRead(t *testing.T, output bool, response ...*fetchResponse) stor
ctrl := xtest.NewController(t)
store1, session1 := m3.NewStorageAndSession(t, ctrl)
store2, session2 := m3.NewStorageAndSession(t, ctrl)
-
+ pools := newTestIteratorPools(ctrl)
session1.EXPECT().FetchTagged(gomock.Any(), gomock.Any(), gomock.Any()).
Return(response[0].result, client.FetchResponseMetadata{Exhaustive: true}, response[0].err)
session2.EXPECT().FetchTagged(gomock.Any(), gomock.Any(), gomock.Any()).
@@ -95,16 +115,16 @@ func setupFanoutRead(t *testing.T, output bool, response ...*fetchResponse) stor
session2.EXPECT().FetchTaggedIDs(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil, client.FetchResponseMetadata{Exhaustive: false}, errs.ErrNotImplemented)
session1.EXPECT().IteratorPools().
- Return(nil, nil).AnyTimes()
+ Return(pools, nil).AnyTimes()
session2.EXPECT().IteratorPools().
- Return(nil, nil).AnyTimes()
+ Return(pools, nil).AnyTimes()
stores := []storage.Storage{
store1, store2,
}
store := NewStorage(stores, filterFunc(output), filterFunc(output),
- filterCompleteTagsFunc(output), instrument.NewOptions())
+ filterCompleteTagsFunc(output), models.NewTagOptions(), instrument.NewOptions())
return store
}
@@ -132,7 +152,7 @@ func setupFanoutWrite(t *testing.T, output bool, errs ...error) storage.Storage
store1, store2,
}
store := NewStorage(stores, filterFunc(output), filterFunc(output),
- filterCompleteTagsFunc(output), instrument.NewOptions())
+ filterCompleteTagsFunc(output), models.NewTagOptions(), instrument.NewOptions())
return store
}
@@ -190,25 +210,33 @@ func TestFanoutWriteError(t *testing.T) {
store := setupFanoutWrite(t, true, fmt.Errorf("write error"))
datapoints := make(ts.Datapoints, 1)
datapoints[0] = ts.Datapoint{Timestamp: time.Now(), Value: 1}
- err := store.Write(context.TODO(), &storage.WriteQuery{
+
+ writeQuery, err := storage.NewWriteQuery(storage.WriteQueryOptions{
Datapoints: datapoints,
- Tags: models.NewTags(0, nil),
+ Tags: models.MustMakeTags("foo", "bar"),
+ Unit: xtime.Second,
})
- assert.Error(t, err)
+ require.NoError(t, err)
+
+ assert.Error(t, store.Write(context.TODO(), writeQuery))
}
func TestFanoutWriteSuccess(t *testing.T) {
store := setupFanoutWrite(t, true, nil)
datapoints := make(ts.Datapoints, 1)
datapoints[0] = ts.Datapoint{Timestamp: time.Now(), Value: 1}
- err := store.Write(context.TODO(), &storage.WriteQuery{
+
+ writeQuery, err := storage.NewWriteQuery(storage.WriteQueryOptions{
Datapoints: datapoints,
- Tags: models.NewTags(0, nil),
- Attributes: storage.Attributes{
- MetricsType: storage.UnaggregatedMetricsType,
+ Tags: models.MustMakeTags("foo", "bar"),
+ Unit: xtime.Second,
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
},
})
- assert.NoError(t, err)
+ require.NoError(t, err)
+
+ assert.NoError(t, store.Write(context.TODO(), writeQuery))
}
func TestCompleteTagsError(t *testing.T) {
@@ -283,7 +311,8 @@ func TestFanoutSearchErrorContinues(t *testing.T) {
warnStore.EXPECT().Name().Return("warn").AnyTimes()
stores := []storage.Storage{warnStore, okStore, dupeStore}
- store := NewStorage(stores, filter, filter, tFilter, instrument.NewOptions())
+ store := NewStorage(stores, filter, filter, tFilter,
+ models.NewTagOptions(), instrument.NewOptions())
opts := storage.NewFetchOptions()
result, err := store.SearchSeries(context.TODO(), &storage.FetchQuery{}, opts)
assert.NoError(t, err)
@@ -305,10 +334,10 @@ func TestFanoutCompleteTagsErrorContinues(t *testing.T) {
okStore := storage.NewMockStorage(ctrl)
okStore.EXPECT().CompleteTags(gomock.Any(), gomock.Any(), gomock.Any()).
Return(
- &storage.CompleteTagsResult{
+ &consolidators.CompleteTagsResult{
CompleteNameOnly: true,
- CompletedTags: []storage.CompletedTag{
- storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
+ consolidators.CompletedTag{
Name: []byte("ok"),
},
},
@@ -319,10 +348,10 @@ func TestFanoutCompleteTagsErrorContinues(t *testing.T) {
warnStore := storage.NewMockStorage(ctrl)
warnStore.EXPECT().CompleteTags(gomock.Any(), gomock.Any(), gomock.Any()).
Return(
- &storage.CompleteTagsResult{
+ &consolidators.CompleteTagsResult{
CompleteNameOnly: true,
- CompletedTags: []storage.CompletedTag{
- storage.CompletedTag{
+ CompletedTags: []consolidators.CompletedTag{
+ consolidators.CompletedTag{
Name: []byte("warn"),
},
},
@@ -333,7 +362,8 @@ func TestFanoutCompleteTagsErrorContinues(t *testing.T) {
warnStore.EXPECT().Name().Return("warn").AnyTimes()
stores := []storage.Storage{warnStore, okStore}
- store := NewStorage(stores, filter, filter, tFilter, instrument.NewOptions())
+ store := NewStorage(stores, filter, filter, tFilter,
+ models.NewTagOptions(), instrument.NewOptions())
opts := storage.NewFetchOptions()
q := &storage.CompleteTagsQuery{CompleteNameOnly: true}
result, err := store.CompleteTags(context.TODO(), q, opts)
@@ -373,7 +403,8 @@ func TestFanoutFetchBlocksErrorContinues(t *testing.T) {
warnStore.EXPECT().Name().Return("warn").AnyTimes()
stores := []storage.Storage{warnStore, okStore}
- store := NewStorage(stores, filter, filter, tFilter, instrument.NewOptions())
+ store := NewStorage(stores, filter, filter, tFilter,
+ models.NewTagOptions(), instrument.NewOptions())
opts := storage.NewFetchOptions()
result, err := store.FetchBlocks(context.TODO(), &storage.FetchQuery{}, opts)
assert.NoError(t, err)
@@ -433,7 +464,8 @@ func TestFanoutFetchErrorContinues(t *testing.T) {
warnStore.EXPECT().Name().Return("warn").AnyTimes()
stores := []storage.Storage{warnStore, okStore}
- store := NewStorage(stores, filter, filter, tFilter, instrument.NewOptions())
+ store := NewStorage(stores, filter, filter, tFilter,
+ models.NewTagOptions(), instrument.NewOptions())
opts := storage.NewFetchOptions()
result, err := store.FetchProm(context.TODO(), &storage.FetchQuery{}, opts)
assert.NoError(t, err)
diff --git a/src/query/storage/fetch.go b/src/query/storage/fetch.go
new file mode 100644
index 0000000000..a20d729eb8
--- /dev/null
+++ b/src/query/storage/fetch.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package storage
+
+import (
+ "time"
+
+ "github.com/m3db/m3/src/query/cost"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+
+ "github.com/uber-go/tally"
+)
+
+// NewFetchOptions creates a new fetch options.
+func NewFetchOptions() *FetchOptions {
+ return &FetchOptions{
+ SeriesLimit: 0,
+ DocsLimit: 0,
+ BlockType: models.TypeSingleBlock,
+ FanoutOptions: &FanoutOptions{
+ FanoutUnaggregated: FanoutDefault,
+ FanoutAggregated: FanoutDefault,
+ FanoutAggregatedOptimized: FanoutDefault,
+ },
+ Enforcer: cost.NoopChainedEnforcer(),
+ Scope: tally.NoopScope,
+ }
+}
+
+// LookbackDurationOrDefault returns either the default lookback duration or
+// overridden lookback duration if set.
+func (o *FetchOptions) LookbackDurationOrDefault(
+ defaultValue time.Duration,
+) time.Duration {
+ if o.LookbackDuration == nil {
+ return defaultValue
+ }
+ return *o.LookbackDuration
+}
+
+// QueryFetchOptions returns fetch options for a given query.
+func (o *FetchOptions) QueryFetchOptions(
+ queryCtx *models.QueryContext,
+ blockType models.FetchedBlockType,
+) (*FetchOptions, error) {
+ r := o.Clone()
+ if r.SeriesLimit <= 0 {
+ r.SeriesLimit = queryCtx.Options.LimitMaxTimeseries
+ }
+ if r.DocsLimit <= 0 {
+ r.DocsLimit = queryCtx.Options.LimitMaxDocs
+ }
+
+ // Use inbuilt options for type restriction if none found.
+ if r.RestrictQueryOptions.GetRestrictByType() == nil &&
+ queryCtx.Options.RestrictFetchType != nil {
+ v := queryCtx.Options.RestrictFetchType
+ restrict := &RestrictByType{
+ MetricsType: storagemetadata.MetricsType(v.MetricsType),
+ StoragePolicy: v.StoragePolicy,
+ }
+
+ if err := restrict.Validate(); err != nil {
+ return nil, err
+ }
+
+ if r.RestrictQueryOptions == nil {
+ r.RestrictQueryOptions = &RestrictQueryOptions{}
+ }
+
+ r.RestrictQueryOptions.RestrictByType = restrict
+ }
+
+ return r, nil
+}
+
+// Clone will clone and return the fetch options.
+func (o *FetchOptions) Clone() *FetchOptions {
+ result := *o
+ return &result
+}
diff --git a/src/query/storage/index.go b/src/query/storage/index.go
index 58a2790748..a933fc3413 100644
--- a/src/query/storage/index.go
+++ b/src/query/storage/index.go
@@ -21,17 +21,19 @@
package storage
import (
- "bytes"
"fmt"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/x/ident"
)
-var (
- dotStar = []byte(".*")
+const (
+ dot = byte('.')
+ plus = byte('+')
+ star = byte('*')
)
// FromM3IdentToMetric converts an M3 ident metric to a coordinator metric.
@@ -40,7 +42,7 @@ func FromM3IdentToMetric(
iterTags ident.TagIterator,
tagOptions models.TagOptions,
) (models.Metric, error) {
- tags, err := FromIdentTagIteratorToTags(iterTags, tagOptions)
+ tags, err := consolidators.FromIdentTagIteratorToTags(iterTags, tagOptions)
if err != nil {
return models.Metric{}, err
}
@@ -51,27 +53,6 @@ func FromM3IdentToMetric(
}, nil
}
-// FromIdentTagIteratorToTags converts ident tags to coordinator tags.
-func FromIdentTagIteratorToTags(
- identTags ident.TagIterator,
- tagOptions models.TagOptions,
-) (models.Tags, error) {
- tags := models.NewTags(identTags.Remaining(), tagOptions)
- for identTags.Next() {
- identTag := identTags.Current()
- tags = tags.AddTag(models.Tag{
- Name: identTag.Name.Bytes(),
- Value: identTag.Value.Bytes(),
- })
- }
-
- if err := identTags.Err(); err != nil {
- return models.EmptyTags(), err
- }
-
- return tags, nil
-}
-
// TagsToIdentTagIterator converts coordinator tags to ident tags.
func TagsToIdentTagIterator(tags models.Tags) ident.TagIterator {
// TODO: get a tags and tag iterator from an ident.Pool here rather than allocing them here
@@ -89,9 +70,11 @@ func TagsToIdentTagIterator(tags models.Tags) ident.TagIterator {
// FetchOptionsToM3Options converts a set of coordinator options to M3 options.
func FetchOptionsToM3Options(fetchOptions *FetchOptions, fetchQuery *FetchQuery) index.QueryOptions {
return index.QueryOptions{
- Limit: fetchOptions.Limit,
- StartInclusive: fetchQuery.Start,
- EndExclusive: fetchQuery.End,
+ SeriesLimit: fetchOptions.SeriesLimit,
+ DocsLimit: fetchOptions.DocsLimit,
+ RequireExhaustive: fetchOptions.RequireExhaustive,
+ StartInclusive: fetchQuery.Start,
+ EndExclusive: fetchQuery.End,
}
}
@@ -111,7 +94,8 @@ func FetchOptionsToAggregateOptions(
) index.AggregationOptions {
return index.AggregationOptions{
QueryOptions: index.QueryOptions{
- Limit: fetchOptions.Limit,
+ SeriesLimit: fetchOptions.SeriesLimit,
+ DocsLimit: fetchOptions.DocsLimit,
StartInclusive: tagQuery.Start,
EndExclusive: tagQuery.End,
},
@@ -136,6 +120,18 @@ func FetchQueryToM3Query(
// Optimization for single matcher case.
if len(matchers) == 1 {
+ specialCase := isSpecialCaseMatcher(matchers[0])
+ if specialCase.skip {
+ // NB: only matcher has no effect; this is synonymous to an AllQuery.
+ return index.Query{
+ Query: idx.NewAllQuery(),
+ }, nil
+ }
+
+ if specialCase.isSpecial {
+ return index.Query{Query: specialCase.query}, nil
+ }
+
q, err := matcherToQuery(matchers[0])
if err != nil {
return index.Query{}, err
@@ -144,13 +140,24 @@ func FetchQueryToM3Query(
return index.Query{Query: q}, nil
}
- idxQueries := make([]idx.Query, len(matchers))
- var err error
- for i, matcher := range matchers {
- idxQueries[i], err = matcherToQuery(matcher)
+ idxQueries := make([]idx.Query, 0, len(matchers))
+ for _, matcher := range matchers {
+ specialCase := isSpecialCaseMatcher(matcher)
+ if specialCase.skip {
+ continue
+ }
+
+ if specialCase.isSpecial {
+ idxQueries = append(idxQueries, specialCase.query)
+ continue
+ }
+
+ q, err := matcherToQuery(matcher)
if err != nil {
return index.Query{}, err
}
+
+ idxQueries = append(idxQueries, q)
}
q := idx.NewConjunctionQuery(idxQueries...)
@@ -158,6 +165,63 @@ func FetchQueryToM3Query(
return index.Query{Query: q}, nil
}
+type specialCase struct {
+ query idx.Query
+ isSpecial bool
+ skip bool
+}
+
+func isSpecialCaseMatcher(matcher models.Matcher) specialCase {
+ if len(matcher.Value) == 0 {
+ if matcher.Type == models.MatchNotRegexp ||
+ matcher.Type == models.MatchNotEqual {
+ query := idx.NewFieldQuery(matcher.Name)
+ return specialCase{query: query, isSpecial: true}
+ }
+
+ if matcher.Type == models.MatchRegexp ||
+ matcher.Type == models.MatchEqual {
+ query := idx.NewNegationQuery(idx.NewFieldQuery(matcher.Name))
+ return specialCase{query: query, isSpecial: true}
+ }
+
+ return specialCase{}
+ }
+
+ // NB: no special case for regex / not regex here.
+ isNegatedRegex := matcher.Type == models.MatchNotRegexp
+ isRegex := matcher.Type == models.MatchRegexp
+ if !isNegatedRegex && !isRegex {
+ return specialCase{}
+ }
+
+ if len(matcher.Value) != 2 || matcher.Value[0] != dot {
+ return specialCase{}
+ }
+
+ if matcher.Value[1] == star {
+ if isNegatedRegex {
+ // NB: This should match no results.
+ query := idx.NewNegationQuery(idx.NewAllQuery())
+ return specialCase{query: query, isSpecial: true}
+ }
+
+ // NB: this matcher should not affect query results.
+ return specialCase{skip: true}
+ }
+
+ if matcher.Value[1] == plus {
+ query := idx.NewFieldQuery(matcher.Name)
+ if isNegatedRegex {
+ query = idx.NewNegationQuery(query)
+ }
+
+ return specialCase{query: query, isSpecial: true}
+ }
+
+ return specialCase{}
+}
+
func matcherToQuery(matcher models.Matcher) (idx.Query, error) {
negate := false
switch matcher.Type {
@@ -165,38 +229,41 @@ func matcherToQuery(matcher models.Matcher) (idx.Query, error) {
case models.MatchNotRegexp:
negate = true
fallthrough
+
case models.MatchRegexp:
var (
query idx.Query
err error
)
- if bytes.Equal(dotStar, matcher.Value) {
- query = idx.NewFieldQuery(matcher.Name)
- } else {
- query, err = idx.NewRegexpQuery(matcher.Name, matcher.Value)
- }
+
+ query, err = idx.NewRegexpQuery(matcher.Name, matcher.Value)
if err != nil {
return idx.Query{}, err
}
+
if negate {
query = idx.NewNegationQuery(query)
}
+
return query, nil
// Support exact matches
case models.MatchNotEqual:
negate = true
fallthrough
+
case models.MatchEqual:
query := idx.NewTermQuery(matcher.Name, matcher.Value)
if negate {
query = idx.NewNegationQuery(query)
}
+
return query, nil
case models.MatchNotField:
negate = true
fallthrough
+
case models.MatchField:
query := idx.NewFieldQuery(matcher.Name)
if negate {
diff --git a/src/query/storage/index_test.go b/src/query/storage/index_test.go
index 9d7cedbf5d..a71c5b1800 100644
--- a/src/query/storage/index_test.go
+++ b/src/query/storage/index_test.go
@@ -72,15 +72,6 @@ func TestFromM3IdentToMetric(t *testing.T) {
assert.Equal(t, name, metric.Tags.Opts.MetricName())
}
-func TestFromIdentTagIteratorToTags(t *testing.T) {
- tagIters := makeTagIter()
- tags, err := FromIdentTagIteratorToTags(tagIters, nil)
- require.NoError(t, err)
- require.Equal(t, len(testTags), tags.Len())
- assert.Equal(t, testTags, tags.Tags)
- assert.Equal(t, []byte("__name__"), tags.Opts.MetricName())
-}
-
func TestFetchQueryToM3Query(t *testing.T) {
tests := []struct {
name string
@@ -121,8 +112,8 @@ func TestFetchQueryToM3Query(t *testing.T) {
},
},
{
- name: "regexp match -> field",
- expected: "field(t1)",
+ name: "regexp match dot star -> all",
+ expected: "all()",
matchers: models.Matchers{
{
Type: models.MatchRegexp,
@@ -131,6 +122,17 @@ func TestFetchQueryToM3Query(t *testing.T) {
},
},
},
+ {
+ name: "regexp match dot plus -> field",
+ expected: "field(t1)",
+ matchers: models.Matchers{
+ {
+ Type: models.MatchRegexp,
+ Name: []byte("t1"),
+ Value: []byte(".+"),
+ },
+ },
+ },
{
name: "regexp match negated",
expected: "negation(regexp(t1, v1))",
@@ -144,7 +146,7 @@ func TestFetchQueryToM3Query(t *testing.T) {
},
{
name: "regexp match negated",
- expected: "negation(field(t1))",
+ expected: "negation(all())",
matchers: models.Matchers{
{
Type: models.MatchNotRegexp,
@@ -189,6 +191,50 @@ func TestFetchQueryToM3Query(t *testing.T) {
},
},
},
+ {
+ name: "regexp match dot star with trailing characters -> regex",
+ expected: "regexp(t1, .*foo)",
+ matchers: models.Matchers{
+ {
+ Type: models.MatchRegexp,
+ Name: []byte("t1"),
+ Value: []byte(".*foo"),
+ },
+ },
+ },
+ {
+ name: "regexp match dot plus with trailing characters -> regex",
+ expected: "regexp(t1, .+foo)",
+ matchers: models.Matchers{
+ {
+ Type: models.MatchRegexp,
+ Name: []byte("t1"),
+ Value: []byte(".+foo"),
+ },
+ },
+ },
+ {
+ name: "not regexp match dot star with trailing characters -> regex",
+ expected: "negation(regexp(t1, .*foo))",
+ matchers: models.Matchers{
+ {
+ Type: models.MatchNotRegexp,
+ Name: []byte("t1"),
+ Value: []byte(".*foo"),
+ },
+ },
+ },
+ {
+ name: "not regexp match dot plus with trailing characters -> regex",
+ expected: "negation(regexp(t1, .+foo))",
+ matchers: models.Matchers{
+ {
+ Type: models.MatchNotRegexp,
+ Name: []byte("t1"),
+ Value: []byte(".+foo"),
+ },
+ },
+ },
}
for _, test := range tests {
@@ -210,7 +256,7 @@ func TestFetchQueryToM3Query(t *testing.T) {
func TestFetchOptionsToAggregateOptions(t *testing.T) {
fetchOptions := &FetchOptions{
- Limit: 7,
+ SeriesLimit: 7,
}
end := time.Now()
diff --git a/src/query/storage/m3/cluster.go b/src/query/storage/m3/cluster.go
index f75df3b354..e525b9f695 100644
--- a/src/query/storage/m3/cluster.go
+++ b/src/query/storage/m3/cluster.go
@@ -28,7 +28,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
)
@@ -78,12 +78,12 @@ type ClusterNamespace interface {
type ClusterNamespaceOptions struct {
// Note: Don't allow direct access, as we want to provide defaults
// and/or error if call to access a field is not relevant/correct.
- attributes storage.Attributes
+ attributes storagemetadata.Attributes
downsample *ClusterNamespaceDownsampleOptions
}
// Attributes returns the storage attributes of the cluster namespace.
-func (o ClusterNamespaceOptions) Attributes() storage.Attributes {
+func (o ClusterNamespaceOptions) Attributes() storagemetadata.Attributes {
return o.attributes
}
@@ -93,7 +93,7 @@ func (o ClusterNamespaceOptions) DownsampleOptions() (
ClusterNamespaceDownsampleOptions,
error,
) {
- if o.attributes.MetricsType != storage.AggregatedMetricsType {
+ if o.attributes.MetricsType != storagemetadata.AggregatedMetricsType {
return ClusterNamespaceDownsampleOptions{}, errNotAggregatedClusterNamespace
}
if o.downsample == nil {
@@ -136,7 +136,7 @@ func (a ClusterNamespacesByRetentionAsc) Less(i, j int) bool {
func (n ClusterNamespaces) NumAggregatedClusterNamespaces() int {
count := 0
for _, namespace := range n {
- if namespace.Options().Attributes().MetricsType == storage.AggregatedMetricsType {
+ if namespace.Options().Attributes().MetricsType == storagemetadata.AggregatedMetricsType {
count++
}
}
@@ -317,8 +317,8 @@ func newUnaggregatedClusterNamespace(
return &clusterNamespace{
namespaceID: ns,
options: ClusterNamespaceOptions{
- attributes: storage.Attributes{
- MetricsType: storage.UnaggregatedMetricsType,
+ attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
Retention: def.Retention,
},
},
@@ -339,8 +339,8 @@ func newAggregatedClusterNamespace(
return &clusterNamespace{
namespaceID: ns,
options: ClusterNamespaceOptions{
- attributes: storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: def.Retention,
Resolution: def.Resolution,
},
diff --git a/src/query/storage/m3/cluster_resolver.go b/src/query/storage/m3/cluster_resolver.go
index 4ddecca550..4b4b22839e 100644
--- a/src/query/storage/m3/cluster_resolver.go
+++ b/src/query/storage/m3/cluster_resolver.go
@@ -26,6 +26,8 @@ import (
"time"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
)
type unaggregatedNamespaceType uint8
@@ -77,7 +79,7 @@ func resolveClusterNamespacesForQuery(
clusters Clusters,
opts *storage.FanoutOptions,
restrict *storage.RestrictQueryOptions,
-) (queryFanoutType, ClusterNamespaces, error) {
+) (consolidators.QueryFanoutType, ClusterNamespaces, error) {
if typeRestrict := restrict.GetRestrictByType(); typeRestrict != nil {
// If a specific restriction is set, then attempt to satisfy.
return resolveClusterNamespacesForQueryWithRestrictQueryOptions(now,
@@ -90,18 +92,18 @@ func resolveClusterNamespacesForQuery(
unaggregated := resolveUnaggregatedNamespaceForQuery(now, start,
clusters.UnaggregatedClusterNamespace(), opts)
if unaggregated.satisfies == fullySatisfiesRange {
- return namespaceCoversAllQueryRange,
+ return consolidators.NamespaceCoversAllQueryRange,
ClusterNamespaces{unaggregated.clusterNamespace},
nil
}
if opts.FanoutAggregated == storage.FanoutForceDisable {
if unaggregated.satisfies == partiallySatisfiesRange {
- return namespaceCoversPartialQueryRange,
+ return consolidators.NamespaceCoversPartialQueryRange,
ClusterNamespaces{unaggregated.clusterNamespace}, nil
}
- return namespaceInvalid, nil, errUnaggregatedAndAggregatedDisabled
+ return consolidators.NamespaceInvalid, nil, errUnaggregatedAndAggregatedDisabled
}
// The filter function will drop namespaces which do not cover the entire
@@ -135,7 +137,7 @@ func resolveClusterNamespacesForQuery(
}
}
- return namespaceCoversAllQueryRange, result, nil
+ return consolidators.NamespaceCoversAllQueryRange, result, nil
}
// No complete aggregated namespaces can definitely fulfill the query,
@@ -159,12 +161,12 @@ func resolveClusterNamespacesForQuery(
// range, set query fanout type to namespaceCoversPartialQueryRange.
for _, n := range result {
if !coversRangeFilter(n) {
- return namespaceCoversPartialQueryRange, result, nil
+ return consolidators.NamespaceCoversPartialQueryRange, result, nil
}
}
// Otherwise, all namespaces cover the query range.
- return namespaceCoversAllQueryRange, result, nil
+ return consolidators.NamespaceCoversAllQueryRange, result, nil
}
// Return the longest retention aggregated namespace and
@@ -200,7 +202,7 @@ func resolveClusterNamespacesForQuery(
}
}
- return namespaceCoversPartialQueryRange, result, nil
+ return consolidators.NamespaceCoversPartialQueryRange, result, nil
}
type reusedAggregatedNamespaceSlices struct {
@@ -255,7 +257,7 @@ func aggregatedNamespaces(
// have all the data).
for _, namespace := range all {
nsOpts := namespace.Options()
- if nsOpts.Attributes().MetricsType != storage.AggregatedMetricsType {
+ if nsOpts.Attributes().MetricsType != storagemetadata.AggregatedMetricsType {
// Not an aggregated cluster.
continue
}
@@ -302,7 +304,7 @@ func resolveClusterNamespacesForQueryWithRestrictQueryOptions(
now, start time.Time,
clusters Clusters,
restrict storage.RestrictByType,
-) (queryFanoutType, ClusterNamespaces, error) {
+) (consolidators.QueryFanoutType, ClusterNamespaces, error) {
coversRangeFilter := newCoversRangeFilter(coversRangeFilterOptions{
now: now,
queryStart: start,
@@ -311,24 +313,24 @@ func resolveClusterNamespacesForQueryWithRestrictQueryOptions(
result := func(
namespace ClusterNamespace,
err error,
- ) (queryFanoutType, ClusterNamespaces, error) {
+ ) (consolidators.QueryFanoutType, ClusterNamespaces, error) {
if err != nil {
return 0, nil, err
}
if coversRangeFilter(namespace) {
- return namespaceCoversAllQueryRange,
+ return consolidators.NamespaceCoversAllQueryRange,
ClusterNamespaces{namespace}, nil
}
- return namespaceCoversPartialQueryRange,
+ return consolidators.NamespaceCoversPartialQueryRange,
ClusterNamespaces{namespace}, nil
}
switch restrict.MetricsType {
- case storage.UnaggregatedMetricsType:
+ case storagemetadata.UnaggregatedMetricsType:
return result(clusters.UnaggregatedClusterNamespace(), nil)
- case storage.AggregatedMetricsType:
+ case storagemetadata.AggregatedMetricsType:
ns, ok := clusters.AggregatedClusterNamespace(RetentionResolution{
Retention: restrict.StoragePolicy.Retention().Duration(),
Resolution: restrict.StoragePolicy.Resolution().Window,
diff --git a/src/query/storage/m3/cluster_resolver_test.go b/src/query/storage/m3/cluster_resolver_test.go
index c0fa180971..5db69c7f26 100644
--- a/src/query/storage/m3/cluster_resolver_test.go
+++ b/src/query/storage/m3/cluster_resolver_test.go
@@ -29,6 +29,8 @@ import (
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/x/ident"
"github.com/golang/mock/gomock"
@@ -164,7 +166,7 @@ var testCases = []struct {
queryLength time.Duration
opts *storage.FanoutOptions
restrict *storage.RestrictQueryOptions
- expectedType queryFanoutType
+ expectedType consolidators.QueryFanoutType
expectedClusterNames []string
expectedErr error
expectedErrContains string
@@ -176,7 +178,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceDisable,
FanoutAggregatedOptimized: storage.FanoutForceDisable,
},
- expectedType: namespaceInvalid,
+ expectedType: consolidators.NamespaceInvalid,
expectedErr: errUnaggregatedAndAggregatedDisabled,
},
{
@@ -186,7 +188,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceDisable,
FanoutAggregatedOptimized: storage.FanoutForceEnable,
},
- expectedType: namespaceInvalid,
+ expectedType: consolidators.NamespaceInvalid,
expectedErr: errUnaggregatedAndAggregatedDisabled,
},
{
@@ -196,7 +198,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceDisable,
FanoutAggregatedOptimized: storage.FanoutForceDisable,
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"UNAGG"},
},
{
@@ -207,7 +209,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceDisable,
FanoutAggregatedOptimized: storage.FanoutForceDisable,
},
- expectedType: namespaceCoversAllQueryRange,
+ expectedType: consolidators.NamespaceCoversAllQueryRange,
expectedClusterNames: []string{"UNAGG"},
},
{
@@ -217,7 +219,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceDisable,
FanoutAggregatedOptimized: storage.FanoutForceEnable,
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"UNAGG"},
},
{
@@ -228,7 +230,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceDisable,
FanoutAggregatedOptimized: storage.FanoutForceEnable,
},
- expectedType: namespaceCoversAllQueryRange,
+ expectedType: consolidators.NamespaceCoversAllQueryRange,
expectedClusterNames: []string{"UNAGG"},
},
{
@@ -238,7 +240,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceEnable,
FanoutAggregatedOptimized: storage.FanoutForceDisable,
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"AGG_FILTERED", "AGG_NO_FILTER",
"AGG_FILTERED_COMPLETE", "AGG_NO_FILTER_COMPLETE"},
},
@@ -250,7 +252,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceEnable,
FanoutAggregatedOptimized: storage.FanoutForceDisable,
},
- expectedType: namespaceCoversAllQueryRange,
+ expectedType: consolidators.NamespaceCoversAllQueryRange,
expectedClusterNames: []string{"AGG_FILTERED", "AGG_NO_FILTER",
"AGG_FILTERED_COMPLETE", "AGG_NO_FILTER_COMPLETE"},
},
@@ -261,7 +263,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceEnable,
FanoutAggregatedOptimized: storage.FanoutForceDisable,
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"UNAGG", "AGG_FILTERED", "AGG_NO_FILTER",
"AGG_FILTERED_COMPLETE", "AGG_NO_FILTER_COMPLETE"},
},
@@ -273,7 +275,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceEnable,
FanoutAggregatedOptimized: storage.FanoutForceDisable,
},
- expectedType: namespaceCoversAllQueryRange,
+ expectedType: consolidators.NamespaceCoversAllQueryRange,
expectedClusterNames: []string{"UNAGG"},
},
{
@@ -283,7 +285,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceEnable,
FanoutAggregatedOptimized: storage.FanoutForceEnable,
},
- expectedType: namespaceCoversAllQueryRange,
+ expectedType: consolidators.NamespaceCoversAllQueryRange,
expectedClusterNames: []string{"AGG_NO_FILTER", "AGG_NO_FILTER_COMPLETE"},
},
{
@@ -294,7 +296,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceEnable,
FanoutAggregatedOptimized: storage.FanoutForceEnable,
},
- expectedType: namespaceCoversAllQueryRange,
+ expectedType: consolidators.NamespaceCoversAllQueryRange,
expectedClusterNames: []string{"UNAGG"},
},
{
@@ -305,7 +307,7 @@ var testCases = []struct {
FanoutAggregated: storage.FanoutForceEnable,
FanoutAggregatedOptimized: storage.FanoutForceEnable,
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"AGG_NO_FILTER", "AGG_NO_FILTER_COMPLETE"},
},
{
@@ -313,10 +315,10 @@ var testCases = []struct {
queryLength: time.Hour * 1000,
restrict: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.UnaggregatedMetricsType,
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
},
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"UNAGG"},
},
{
@@ -324,12 +326,12 @@ var testCases = []struct {
queryLength: time.Hour * 1000,
restrict: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.MustParseStoragePolicy(
genResolution.String() + ":" + genRetentionFiltered.String()),
},
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"AGG_FILTERED"},
},
{
@@ -337,12 +339,12 @@ var testCases = []struct {
queryLength: time.Hour * 1000,
restrict: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.MustParseStoragePolicy(
genResolution.String() + ":" + genRetentionUnfiltered.String()),
},
},
- expectedType: namespaceCoversPartialQueryRange,
+ expectedType: consolidators.NamespaceCoversPartialQueryRange,
expectedClusterNames: []string{"AGG_NO_FILTER"},
},
{
@@ -350,7 +352,7 @@ var testCases = []struct {
queryLength: time.Hour * 1000,
restrict: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.UnknownMetricsType,
+ MetricsType: storagemetadata.UnknownMetricsType,
},
},
expectedErrContains: "unrecognized metrics type:",
@@ -360,7 +362,7 @@ var testCases = []struct {
queryLength: time.Hour * 1000,
restrict: &storage.RestrictQueryOptions{
RestrictByType: &storage.RestrictByType{
- MetricsType: storage.AggregatedMetricsType,
+ MetricsType: storagemetadata.AggregatedMetricsType,
StoragePolicy: policy.MustParseStoragePolicy("1s:100d"),
},
},
@@ -477,7 +479,7 @@ func TestLongUnaggregatedRetention(t *testing.T) {
sort.Sort(sort.StringSlice(actualNames))
sort.Sort(sort.StringSlice(expected))
assert.Equal(t, expected, actualNames)
- assert.Equal(t, namespaceCoversPartialQueryRange, fanoutType)
+ assert.Equal(t, consolidators.NamespaceCoversPartialQueryRange, fanoutType)
}
func TestExampleCase(t *testing.T) {
@@ -524,6 +526,6 @@ func TestExampleCase(t *testing.T) {
sort.Sort(sort.StringSlice(actualNames))
assert.Equal(t, []string{"metrics_10s_24h",
"metrics_180s_360h", "metrics_600s_17520h"}, actualNames)
- assert.Equal(t, namespaceCoversPartialQueryRange, fanoutType)
+ assert.Equal(t, consolidators.NamespaceCoversPartialQueryRange, fanoutType)
}
}
diff --git a/src/query/storage/m3/cluster_test.go b/src/query/storage/m3/cluster_test.go
index 161345ef1b..da938c95be 100644
--- a/src/query/storage/m3/cluster_test.go
+++ b/src/query/storage/m3/cluster_test.go
@@ -27,7 +27,7 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -74,7 +74,7 @@ func TestNewClustersFromConfig(t *testing.T) {
Namespaces: []ClusterStaticNamespaceConfiguration{
ClusterStaticNamespaceConfiguration{
Namespace: "unaggregated",
- Type: storage.UnaggregatedMetricsType,
+ Type: storagemetadata.UnaggregatedMetricsType,
Retention: 7 * 24 * time.Hour,
},
},
@@ -84,13 +84,13 @@ func TestNewClustersFromConfig(t *testing.T) {
Namespaces: []ClusterStaticNamespaceConfiguration{
ClusterStaticNamespaceConfiguration{
Namespace: "aggregated0",
- Type: storage.AggregatedMetricsType,
+ Type: storagemetadata.AggregatedMetricsType,
Retention: 30 * 24 * time.Hour,
Resolution: time.Minute,
},
ClusterStaticNamespaceConfiguration{
Namespace: "aggregated1",
- Type: storage.AggregatedMetricsType,
+ Type: storagemetadata.AggregatedMetricsType,
Retention: 365 * 24 * time.Hour,
Resolution: 10 * time.Minute,
},
@@ -105,8 +105,8 @@ func TestNewClustersFromConfig(t *testing.T) {
// Resolve expected clusters and check attributes
unaggregatedNs := clusters.UnaggregatedClusterNamespace()
assert.Equal(t, "unaggregated", unaggregatedNs.NamespaceID().String())
- assert.Equal(t, storage.Attributes{
- MetricsType: storage.UnaggregatedMetricsType,
+ assert.Equal(t, storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
Retention: 7 * 24 * time.Hour,
}, unaggregatedNs.Options().Attributes())
assert.True(t, mockSession1 == unaggregatedNs.Session())
@@ -117,8 +117,8 @@ func TestNewClustersFromConfig(t *testing.T) {
})
require.True(t, ok)
assert.Equal(t, "aggregated0", aggregated1Month1Minute.NamespaceID().String())
- assert.Equal(t, storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ assert.Equal(t, storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 30 * 24 * time.Hour,
Resolution: time.Minute,
}, aggregated1Month1Minute.Options().Attributes())
@@ -130,8 +130,8 @@ func TestNewClustersFromConfig(t *testing.T) {
})
require.True(t, ok)
assert.Equal(t, "aggregated1", aggregated1Year10Minute.NamespaceID().String())
- assert.Equal(t, storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ assert.Equal(t, storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 365 * 24 * time.Hour,
Resolution: 10 * time.Minute,
}, aggregated1Year10Minute.Options().Attributes())
@@ -164,7 +164,7 @@ func newTestClientFromConfig(ctrl *gomock.Controller) (
newClientFn := func(
_ client.Configuration,
_ client.ConfigurationParameters,
- _ ...client.CustomOption,
+ _ ...client.CustomAdminOption,
) (client.Client, error) {
return mockClient, nil
}
diff --git a/src/query/storage/m3/config.go b/src/query/storage/m3/config.go
index c12e106ae4..7672adf2c4 100644
--- a/src/query/storage/m3/config.go
+++ b/src/query/storage/m3/config.go
@@ -27,12 +27,10 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/stores/m3db"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
-
- "github.com/uber/tchannel-go"
)
var (
@@ -40,12 +38,6 @@ var (
errBothNamespaceTypeNewAndDeprecatedFieldsSet = goerrors.New("cannot specify both deprecated and non-deprecated fields for namespace type")
)
-// TODO(bodu): Could make these configurable at some point.
-const (
- idleCheckInterval = 5 * time.Minute
- maxIdleTime = 5 * time.Minute
-)
-
// ClustersStaticConfiguration is a set of static cluster configurations.
type ClustersStaticConfiguration []ClusterStaticConfiguration
@@ -54,7 +46,7 @@ type ClustersStaticConfiguration []ClusterStaticConfiguration
type NewClientFromConfig func(
cfg client.Configuration,
params client.ConfigurationParameters,
- custom ...client.CustomOption,
+ custom ...client.CustomAdminOption,
) (client.Client, error)
// ClusterStaticConfiguration is a static cluster configuration.
@@ -66,12 +58,12 @@ type ClusterStaticConfiguration struct {
func (c ClusterStaticConfiguration) newClient(
params client.ConfigurationParameters,
- custom ...client.CustomOption,
+ custom ...client.CustomAdminOption,
) (client.Client, error) {
if c.NewClientFromConfig != nil {
return c.NewClientFromConfig(c.Client, params, custom...)
}
- return c.Client.NewClient(params, custom...)
+ return c.Client.NewAdminClient(params, custom...)
}
// ClusterStaticNamespaceConfiguration describes the namespaces in a
@@ -82,7 +74,7 @@ type ClusterStaticNamespaceConfiguration struct {
// Type is the type of values stored by the namespace, current
// supported values are "unaggregated" or "aggregated".
- Type storage.MetricsType `yaml:"type"`
+ Type storagemetadata.MetricsType `yaml:"type"`
// Retention is the length of which values are stored by the namespace.
Retention time.Duration `yaml:"retention" validate:"nonzero"`
@@ -98,11 +90,11 @@ type ClusterStaticNamespaceConfiguration struct {
//
// Deprecated: Use "Type" field when specifying config instead, it is
// invalid to use both.
- StorageMetricsType storage.MetricsType `yaml:"storageMetricsType"`
+ StorageMetricsType storagemetadata.MetricsType `yaml:"storageMetricsType"`
}
-func (c ClusterStaticNamespaceConfiguration) metricsType() (storage.MetricsType, error) {
- unset := storage.MetricsType(0)
+func (c ClusterStaticNamespaceConfiguration) metricsType() (storagemetadata.MetricsType, error) {
+ unset := storagemetadata.MetricsType(0)
if c.Type != unset && c.StorageMetricsType != unset {
// Don't allow both to not be default
return unset, errBothNamespaceTypeNewAndDeprecatedFieldsSet
@@ -119,7 +111,7 @@ func (c ClusterStaticNamespaceConfiguration) metricsType() (storage.MetricsType,
}
// Both are unset
- return storage.DefaultMetricsType, nil
+ return storagemetadata.DefaultMetricsType, nil
}
func (c ClusterStaticNamespaceConfiguration) downsampleOptions() (
@@ -130,7 +122,7 @@ func (c ClusterStaticNamespaceConfiguration) downsampleOptions() (
if err != nil {
return ClusterNamespaceDownsampleOptions{}, err
}
- if nsType != storage.AggregatedMetricsType {
+ if nsType != storagemetadata.AggregatedMetricsType {
return ClusterNamespaceDownsampleOptions{}, errNotAggregatedClusterNamespace
}
if c.Downsample == nil {
@@ -170,8 +162,9 @@ type clusterConnectResult struct {
// ClustersStaticConfigurationOptions are options to use when
// constructing clusters from config.
type ClustersStaticConfigurationOptions struct {
- AsyncSessions bool
- ProvidedSession client.Session
+ AsyncSessions bool
+ ProvidedSession client.Session
+ CustomAdminOptions []client.CustomAdminOption
}
// NewClusters instantiates a new Clusters instance.
@@ -195,16 +188,9 @@ func (c ClustersStaticConfiguration) NewClusters(
if opts.ProvidedSession == nil {
// NB(r): Only create client session if not already provided.
- result, err = clusterCfg.newClient(
- client.ConfigurationParameters{
- InstrumentOptions: instrumentOpts,
- },
- func(opts client.Options) client.Options {
- return opts.SetChannelOptions(&tchannel.ChannelOptions{
- IdleCheckInterval: idleCheckInterval,
- MaxIdleTime: maxIdleTime,
- })
- })
+ result, err = clusterCfg.newClient(client.ConfigurationParameters{
+ InstrumentOptions: instrumentOpts,
+ }, opts.CustomAdminOptions...)
if err != nil {
return nil, err
}
@@ -221,7 +207,7 @@ func (c ClustersStaticConfiguration) NewClusters(
}
switch nsType {
- case storage.UnaggregatedMetricsType:
+ case storagemetadata.UnaggregatedMetricsType:
numUnaggregatedClusterNamespaces++
if numUnaggregatedClusterNamespaces > 1 {
return nil, fmt.Errorf("only one unaggregated cluster namespace "+
@@ -231,7 +217,7 @@ func (c ClustersStaticConfiguration) NewClusters(
unaggregatedClusterNamespaceCfg.client = result
unaggregatedClusterNamespaceCfg.namespace = n
- case storage.AggregatedMetricsType:
+ case storagemetadata.AggregatedMetricsType:
numAggregatedClusterNamespaces++
aggregatedClusterNamespacesCfg.namespaces =
diff --git a/src/query/storage/m3/multi_fetch_tags_result.go b/src/query/storage/m3/consolidators/complete_tags_result.go
similarity index 86%
rename from src/query/storage/m3/multi_fetch_tags_result.go
rename to src/query/storage/m3/consolidators/complete_tags_result.go
index 927b81c227..53f296c88b 100644
--- a/src/query/storage/m3/multi_fetch_tags_result.go
+++ b/src/query/storage/m3/consolidators/complete_tags_result.go
@@ -18,13 +18,14 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package m3
+package consolidators
import (
"sync"
"github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
xerrors "github.com/m3db/m3/src/x/errors"
)
@@ -36,13 +37,15 @@ type multiSearchResult struct {
err xerrors.MultiError
seenIters []client.TaggedIDsIterator // track known iterators to avoid leaking
dedupeMap map[string]MultiTagResult
+ filters models.Filters
}
-// NewMultiFetchTagsResult builds a new multi fetch tags result
-func NewMultiFetchTagsResult() MultiFetchTagsResult {
+// NewMultiFetchTagsResult builds a new multi fetch tags result.
+func NewMultiFetchTagsResult(opts models.TagOptions) MultiFetchTagsResult {
return &multiSearchResult{
dedupeMap: make(map[string]MultiTagResult, initSize),
meta: block.NewResultMetadata(),
+ filters: opts.Filters(),
}
}
@@ -110,6 +113,17 @@ func (r *multiSearchResult) Add(
for newIterator.Next() {
_, ident, tagIter := newIterator.Current()
+ shouldFilter, err := filterTagIterator(tagIter, r.filters)
+ if err != nil {
+ r.err = r.err.Add(err)
+ return
+ }
+
+ if shouldFilter {
+ // NB: skip here, the closer will free the tag iterator regardless.
+ continue
+ }
+
id := ident.String()
_, exists := r.dedupeMap[id]
if !exists {
diff --git a/src/query/storage/m3/consolidators/complete_tags_result_test.go b/src/query/storage/m3/consolidators/complete_tags_result_test.go
new file mode 100644
index 0000000000..d04f1359c6
--- /dev/null
+++ b/src/query/storage/m3/consolidators/complete_tags_result_test.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestExhaustiveTagMerge(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ r := NewMultiFetchTagsResult(models.NewTagOptions())
+ for _, tt := range exhaustTests {
+ t.Run(tt.name, func(t *testing.T) {
+ for _, ex := range tt.exhaustives {
+ it := client.NewMockTaggedIDsIterator(ctrl)
+ it.EXPECT().Next().Return(false)
+ it.EXPECT().Err().Return(nil)
+ it.EXPECT().Finalize().Return()
+ meta := block.NewResultMetadata()
+ meta.Exhaustive = ex
+ r.Add(it, meta, nil)
+ }
+
+ tagResult, err := r.FinalResult()
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expected, tagResult.Metadata.Exhaustive)
+ assert.NoError(t, r.Close())
+ })
+ }
+}
+
+func TestMultiFetchTagsResult(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ iter := client.NewMockTaggedIDsIterator(ctrl)
+ iter.EXPECT().Next().Return(true)
+ iter.EXPECT().Current().Return(
+ ident.StringID("ns"),
+ ident.StringID("id"),
+ ident.MustNewTagStringsIterator("foo", "bar"))
+ iter.EXPECT().Next().Return(true)
+ iter.EXPECT().Current().Return(
+ ident.StringID("ns"),
+ ident.StringID("id"),
+ ident.MustNewTagStringsIterator("foo", "baz"))
+ iter.EXPECT().Next().Return(false)
+ iter.EXPECT().Err()
+
+ opts := models.NewTagOptions().SetFilters(models.Filters{
+ models.Filter{Name: b("foo"), Values: [][]byte{b("baz")}},
+ })
+
+ r := NewMultiFetchTagsResult(opts)
+ r.Add(iter, block.NewResultMetadata(), nil)
+
+ res, err := r.FinalResult()
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(res.Tags))
+ assert.Equal(t, "id", res.Tags[0].ID.String())
+ it := res.Tags[0].Iter
+
+ // NB: assert tags are still iteratable.
+ ex := []tag{tag{name: "foo", value: "bar"}}
+ for i := 0; it.Next(); i++ {
+ tag := it.Current()
+ assert.Equal(t, ex[i].name, tag.Name.String())
+ assert.Equal(t, ex[i].value, tag.Value.String())
+ }
+
+ require.NoError(t, it.Err())
+}
diff --git a/src/query/storage/completed_tags.go b/src/query/storage/m3/consolidators/completed_tags.go
similarity index 94%
rename from src/query/storage/completed_tags.go
rename to src/query/storage/m3/consolidators/completed_tags.go
index 3a937f5337..99bf11faaf 100644
--- a/src/query/storage/completed_tags.go
+++ b/src/query/storage/m3/consolidators/completed_tags.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package storage
+package consolidators
import (
"bytes"
@@ -27,6 +27,7 @@ import (
"sync"
"github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
)
type completeTagsResultBuilder struct {
@@ -34,15 +35,19 @@ type completeTagsResultBuilder struct {
nameOnly bool
metadata block.ResultMetadata
tagBuilders map[string]completedTagBuilder
+
+ filters models.Filters
}
// NewCompleteTagsResultBuilder creates a new complete tags result builder.
func NewCompleteTagsResultBuilder(
nameOnly bool,
+ opts models.TagOptions,
) CompleteTagsResultBuilder {
return &completeTagsResultBuilder{
nameOnly: nameOnly,
metadata: block.NewResultMetadata(),
+ filters: opts.Filters(),
}
}
@@ -62,6 +67,7 @@ func (b *completeTagsResultBuilder) Add(tagResult *CompleteTagsResult) error {
b.metadata = b.metadata.CombineMetadata(tagResult.Metadata)
if nameOnly {
+ completedTags = filterNames(completedTags, b.filters)
for _, tag := range completedTags {
b.tagBuilders[string(tag.Name)] = completedTagBuilder{}
}
@@ -69,6 +75,7 @@ func (b *completeTagsResultBuilder) Add(tagResult *CompleteTagsResult) error {
return nil
}
+ completedTags = filterTags(completedTags, b.filters)
for _, tag := range completedTags {
if builder, exists := b.tagBuilders[string(tag.Name)]; exists {
builder.add(tag.Values)
diff --git a/src/query/storage/completed_tags_test.go b/src/query/storage/m3/consolidators/completed_tags_test.go
similarity index 66%
rename from src/query/storage/completed_tags_test.go
rename to src/query/storage/m3/consolidators/completed_tags_test.go
index 3f29d6e23f..197034f6fd 100644
--- a/src/query/storage/completed_tags_test.go
+++ b/src/query/storage/m3/consolidators/completed_tags_test.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package storage
+package consolidators
import (
"fmt"
@@ -26,8 +26,10 @@ import (
"testing"
"github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func strsToBytes(str []string) [][]byte {
@@ -85,7 +87,7 @@ func TestMergeCompletedTag(t *testing.T) {
func TestMergeCompletedTagResultDifferentNameTypes(t *testing.T) {
nameOnlyVals := []bool{true, false}
for _, nameOnly := range nameOnlyVals {
- builder := NewCompleteTagsResultBuilder(nameOnly)
+ builder := NewCompleteTagsResultBuilder(nameOnly, models.NewTagOptions())
err := builder.Add(&CompleteTagsResult{
CompleteNameOnly: !nameOnly,
})
@@ -97,7 +99,7 @@ func TestMergeCompletedTagResultDifferentNameTypes(t *testing.T) {
func TestMergeEmptyCompletedTagResult(t *testing.T) {
nameOnlyVals := []bool{true, false}
for _, nameOnly := range nameOnlyVals {
- builder := NewCompleteTagsResultBuilder(nameOnly)
+ builder := NewCompleteTagsResultBuilder(nameOnly, models.NewTagOptions())
actual := builder.Build()
expected := CompleteTagsResult{
CompleteNameOnly: nameOnly,
@@ -186,7 +188,7 @@ func TestMergeCompletedTagResult(t *testing.T) {
for _, nameOnly := range nameOnlyVals {
for _, tt := range testMergeCompletedTags {
t.Run(fmt.Sprintf("%s_%t", tt.name, nameOnly), func(t *testing.T) {
- builder := NewCompleteTagsResultBuilder(nameOnly)
+ builder := NewCompleteTagsResultBuilder(nameOnly, models.NewTagOptions())
for _, incoming := range tt.incoming {
result := mapToCompletedTag(nameOnly, incoming)
err := builder.Add(&result)
@@ -206,23 +208,10 @@ func TestMergeCompletedTagResult(t *testing.T) {
}
}
-var exhaustTests = []struct {
- name string
- exhaustives []bool
- expected bool
-}{
- {"single exhaustive", []bool{true}, true},
- {"single non-exhaustive", []bool{false}, false},
- {"multiple exhaustive", []bool{true, true}, true},
- {"multiple non-exhaustive", []bool{false, false}, false},
- {"some exhaustive", []bool{true, false}, false},
- {"mixed", []bool{true, false, true}, false},
-}
-
func TestMetaMerge(t *testing.T) {
for _, nameOnly := range []bool{true, false} {
for _, tt := range exhaustTests {
- builder := NewCompleteTagsResultBuilder(nameOnly)
+ builder := NewCompleteTagsResultBuilder(nameOnly, models.NewTagOptions())
t.Run(fmt.Sprintf("%s_%v", tt.name, nameOnly), func(t *testing.T) {
for _, ex := range tt.exhaustives {
meta := block.NewResultMetadata()
@@ -239,3 +228,84 @@ func TestMetaMerge(t *testing.T) {
}
}
}
+
+func TestCompleteTagNameFilter(t *testing.T) {
+ filters := models.Filters{
+ models.Filter{Name: b("foo")},
+ models.Filter{Name: b("bar"), Values: [][]byte{b("baz"), b("qux")}},
+ }
+
+ opts := models.NewTagOptions().SetFilters(filters)
+ builder := NewCompleteTagsResultBuilder(true, opts)
+ assert.NoError(t, builder.Add(&CompleteTagsResult{
+ CompleteNameOnly: true,
+ Metadata: block.NewResultMetadata(),
+ CompletedTags: []CompletedTag{
+ {Name: b("foo")}, {Name: b("qux")}, {Name: b("bar")},
+ },
+ }))
+
+ assert.NoError(t, builder.Add(&CompleteTagsResult{
+ CompleteNameOnly: true,
+ Metadata: block.NewResultMetadata(),
+ CompletedTags: []CompletedTag{
+ {Name: b("foo")},
+ },
+ }))
+
+ res := builder.Build()
+ require.True(t, res.CompleteNameOnly)
+ require.Equal(t, 2, len(res.CompletedTags))
+ sort.Sort(completedTagsByName(res.CompletedTags))
+
+ assert.Equal(t, b("bar"), res.CompletedTags[0].Name)
+ assert.Equal(t, b("qux"), res.CompletedTags[1].Name)
+}
+
+func TestCompleteTagFilter(t *testing.T) {
+ filters := models.Filters{
+ models.Filter{Name: b("foo"), Values: [][]byte{b("bar")}},
+ models.Filter{Name: b("bar"), Values: [][]byte{b("baz"), b("qux")}},
+ models.Filter{Name: b("qux")},
+ }
+
+ opts := models.NewTagOptions().SetFilters(filters)
+ builder := NewCompleteTagsResultBuilder(false, opts)
+ assert.NoError(t, builder.Add(&CompleteTagsResult{
+ CompleteNameOnly: false,
+ Metadata: block.NewResultMetadata(),
+ CompletedTags: []CompletedTag{
+ {Name: b("foo"), Values: [][]byte{b("bar"), b("foobar")}},
+ {Name: b("bar"), Values: [][]byte{b("qux"), b("baz")}},
+ {Name: b("qux"), Values: [][]byte{b("abc"), b("def")}},
+ },
+ }))
+
+ assert.NoError(t, builder.Add(&CompleteTagsResult{
+ CompleteNameOnly: false,
+ Metadata: block.NewResultMetadata(),
+ CompletedTags: []CompletedTag{
+ {Name: b("foo"), Values: [][]byte{b("bar"), b("foofoo")}},
+ {Name: b("qux"), Values: [][]byte{b("xyz")}},
+ {Name: b("quince"), Values: [][]byte{b("quail"), b("quart")}},
+ },
+ }))
+
+ res := builder.Build()
+ require.False(t, res.CompleteNameOnly)
+ require.Equal(t, 2, len(res.CompletedTags))
+ sort.Sort(completedTagsByName(res.CompletedTags))
+
+ ex := []CompletedTag{
+ CompletedTag{
+ Name: b("foo"),
+ Values: [][]byte{b("foobar"), b("foofoo")},
+ },
+ CompletedTag{
+ Name: b("quince"),
+ Values: [][]byte{b("quail"), b("quart")},
+ },
+ }
+
+ assert.Equal(t, ex, res.CompletedTags)
+}
diff --git a/src/query/storage/m3/consolidators/convert.go b/src/query/storage/m3/consolidators/convert.go
new file mode 100644
index 0000000000..03eb39515b
--- /dev/null
+++ b/src/query/storage/m3/consolidators/convert.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+)
+
+// FromIdentTagIteratorToTags converts ident tags to coordinator tags.
+func FromIdentTagIteratorToTags(
+ identTags ident.TagIterator,
+ tagOptions models.TagOptions,
+) (models.Tags, error) {
+ tags := models.NewTags(identTags.Remaining(), tagOptions)
+ for identTags.Next() {
+ identTag := identTags.Current()
+ tags = tags.AddTag(models.Tag{
+ Name: identTag.Name.Bytes(),
+ Value: identTag.Value.Bytes(),
+ })
+ }
+
+ if err := identTags.Err(); err != nil {
+ return models.EmptyTags(), err
+ }
+
+ return tags, nil
+}
diff --git a/src/query/storage/m3/consolidators/convert_test.go b/src/query/storage/m3/consolidators/convert_test.go
new file mode 100644
index 0000000000..41b2bd56e9
--- /dev/null
+++ b/src/query/storage/m3/consolidators/convert_test.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFromIdentTagIteratorToTags(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ tagFn := func(n, v string) ident.Tag {
+ return ident.Tag{
+ Name: ident.StringID(n),
+ Value: ident.StringID(v),
+ }
+ }
+
+ it := ident.NewMockTagIterator(ctrl)
+ it.EXPECT().Remaining().Return(2)
+ it.EXPECT().Next().Return(true)
+ it.EXPECT().Current().Return(tagFn("foo", "bar"))
+ it.EXPECT().Next().Return(true)
+ it.EXPECT().Current().Return(tagFn("baz", "qux"))
+ it.EXPECT().Next().Return(false)
+ it.EXPECT().Err().Return(nil)
+
+ opts := models.NewTagOptions().SetIDSchemeType(models.TypeQuoted)
+ tags, err := FromIdentTagIteratorToTags(it, opts)
+ require.NoError(t, err)
+ require.Equal(t, 2, tags.Len())
+ assert.Equal(t, `{baz="qux",foo="bar"}`, string(tags.ID()))
+ assert.Equal(t, []byte("__name__"), tags.Opts.MetricName())
+}
diff --git a/src/query/storage/m3/consolidators/fetch_result_map_gen.go b/src/query/storage/m3/consolidators/fetch_result_map_gen.go
new file mode 100644
index 0000000000..3ded87a48d
--- /dev/null
+++ b/src/query/storage/m3/consolidators/fetch_result_map_gen.go
@@ -0,0 +1,275 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// This file was automatically generated by genny.
+// Any changes will be lost if this file is regenerated.
+// see https://github.com/mauricelam/genny
+
+package consolidators
+
+import (
+ "github.com/m3db/m3/src/query/models"
+)
+
+// Copyright (c) 2018 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// fetchResultMapHash is the hash for a given map entry, this is public to support
+// iterating over the map using a native Go for loop.
+type fetchResultMapHash uint64
+
+// fetchResultMapHashFn is the hash function to execute when hashing a key.
+type fetchResultMapHashFn func(models.Tags) fetchResultMapHash
+
+// fetchResultMapEqualsFn is the equals key function to execute when detecting equality of a key.
+type fetchResultMapEqualsFn func(models.Tags, models.Tags) bool
+
+// fetchResultMapCopyFn is the copy key function to execute when copying the key.
+type fetchResultMapCopyFn func(models.Tags) models.Tags
+
+// fetchResultMapFinalizeFn is the finalize key function to execute when finished with a key.
+type fetchResultMapFinalizeFn func(models.Tags)
+
+// fetchResultMap uses the genny package to provide a generic hash map that can be specialized
+// by running the following command from this root of the repository:
+// ```
+// make hashmap-gen pkg=outpkg key_type=Type value_type=Type out_dir=/tmp
+// ```
+// Or if you would like to use bytes or ident.ID as keys you can use the
+// partially specialized maps to generate your own maps as well:
+// ```
+// make byteshashmap-gen pkg=outpkg value_type=Type out_dir=/tmp
+// make idhashmap-gen pkg=outpkg value_type=Type out_dir=/tmp
+// ```
+// This will output to stdout the generated source file to use for your map.
+// It uses linear probing by incrementing the number of the hash created when
+// hashing the identifier if there is a collision.
+// fetchResultMap is a value type and not an interface to allow for less painful
+// upgrades when adding/removing methods, it is not likely to need mocking so
+// an interface would not be super useful either.
+type fetchResultMap struct {
+ _fetchResultMapOptions
+
+ // lookup uses hash of the identifier for the key and the MapEntry value
+ // wraps the value type and the key (used to ensure lookup is correct
+ // when dealing with collisions), we use uint64 for the hash partially
+ // because lookups of maps with uint64 keys has a fast path for Go.
+ lookup map[fetchResultMapHash]fetchResultMapEntry
+}
+
+// _fetchResultMapOptions is a set of options used when creating an identifier map, it is kept
+// private so that implementers of the generated map can specify their own options
+// that partially fulfill these options.
+type _fetchResultMapOptions struct {
+ // hash is the hash function to execute when hashing a key.
+ hash fetchResultMapHashFn
+ // equals is the equals key function to execute when detecting equality.
+ equals fetchResultMapEqualsFn
+ // copy is the copy key function to execute when copying the key.
+ copy fetchResultMapCopyFn
+ // finalize is the finalize key function to execute when finished with a
+ // key, this is optional to specify.
+ finalize fetchResultMapFinalizeFn
+ // initialSize is the initial size for the map, use zero to use Go's std map
+ // initial size and consequently is optional to specify.
+ initialSize int
+}
+
+// fetchResultMapEntry is an entry in the map, this is public to support iterating
+// over the map using a native Go for loop.
+type fetchResultMapEntry struct {
+ // key is used to check equality on lookups to resolve collisions
+ key _fetchResultMapKey
+ // value type stored
+ value multiResultSeries
+}
+
+type _fetchResultMapKey struct {
+ key models.Tags
+ finalize bool
+}
+
+// Key returns the map entry key.
+func (e fetchResultMapEntry) Key() models.Tags {
+ return e.key.key
+}
+
+// Value returns the map entry value.
+func (e fetchResultMapEntry) Value() multiResultSeries {
+ return e.value
+}
+
+// _fetchResultMapAlloc is a non-exported function so that when generating the source code
+// for the map you can supply a public constructor that sets the correct
+// hash, equals, copy, finalize options without users of the map needing to
+// implement them themselves.
+func _fetchResultMapAlloc(opts _fetchResultMapOptions) *fetchResultMap {
+ m := &fetchResultMap{_fetchResultMapOptions: opts}
+ m.Reallocate()
+ return m
+}
+
+func (m *fetchResultMap) newMapKey(k models.Tags, opts _fetchResultMapKeyOptions) _fetchResultMapKey {
+ key := _fetchResultMapKey{key: k, finalize: opts.finalizeKey}
+ if !opts.copyKey {
+ return key
+ }
+
+ key.key = m.copy(k)
+ return key
+}
+
+func (m *fetchResultMap) removeMapKey(hash fetchResultMapHash, key _fetchResultMapKey) {
+ delete(m.lookup, hash)
+ if key.finalize {
+ m.finalize(key.key)
+ }
+}
+
+// Get returns a value in the map for an identifier if found.
+func (m *fetchResultMap) Get(k models.Tags) (multiResultSeries, bool) {
+ hash := m.hash(k)
+ for entry, ok := m.lookup[hash]; ok; entry, ok = m.lookup[hash] {
+ if m.equals(entry.key.key, k) {
+ return entry.value, true
+ }
+ // Linear probe to "next" to this entry (really a rehash)
+ hash++
+ }
+ var empty multiResultSeries
+ return empty, false
+}
+
+// Set will set the value for an identifier.
+func (m *fetchResultMap) Set(k models.Tags, v multiResultSeries) {
+ m.set(k, v, _fetchResultMapKeyOptions{
+ copyKey: true,
+ finalizeKey: m.finalize != nil,
+ })
+}
+
+// fetchResultMapSetUnsafeOptions is a set of options to use when setting a value with
+// the SetUnsafe method.
+type fetchResultMapSetUnsafeOptions struct {
+ NoCopyKey bool
+ NoFinalizeKey bool
+}
+
+// SetUnsafe will set the value for an identifier with unsafe options for how
+// the map treats the key.
+func (m *fetchResultMap) SetUnsafe(k models.Tags, v multiResultSeries, opts fetchResultMapSetUnsafeOptions) {
+ m.set(k, v, _fetchResultMapKeyOptions{
+ copyKey: !opts.NoCopyKey,
+ finalizeKey: !opts.NoFinalizeKey,
+ })
+}
+
+type _fetchResultMapKeyOptions struct {
+ copyKey bool
+ finalizeKey bool
+}
+
+func (m *fetchResultMap) set(k models.Tags, v multiResultSeries, opts _fetchResultMapKeyOptions) {
+ hash := m.hash(k)
+ for entry, ok := m.lookup[hash]; ok; entry, ok = m.lookup[hash] {
+ if m.equals(entry.key.key, k) {
+ m.lookup[hash] = fetchResultMapEntry{
+ key: entry.key,
+ value: v,
+ }
+ return
+ }
+ // Linear probe to "next" to this entry (really a rehash)
+ hash++
+ }
+
+ m.lookup[hash] = fetchResultMapEntry{
+ key: m.newMapKey(k, opts),
+ value: v,
+ }
+}
+
+// Iter provides the underlying map to allow for using a native Go for loop
+// to iterate the map, however callers should only ever read and not write
+// the map.
+func (m *fetchResultMap) Iter() map[fetchResultMapHash]fetchResultMapEntry {
+ return m.lookup
+}
+
+// Len returns the number of map entries in the map.
+func (m *fetchResultMap) Len() int {
+ return len(m.lookup)
+}
+
+// Contains returns true if value exists for key, false otherwise, it is
+// shorthand for a call to Get that doesn't return the value.
+func (m *fetchResultMap) Contains(k models.Tags) bool {
+ _, ok := m.Get(k)
+ return ok
+}
+
+// Delete will remove a value set in the map for the specified key.
+func (m *fetchResultMap) Delete(k models.Tags) {
+ hash := m.hash(k)
+ for entry, ok := m.lookup[hash]; ok; entry, ok = m.lookup[hash] {
+ if m.equals(entry.key.key, k) {
+ m.removeMapKey(hash, entry.key)
+ return
+ }
+ // Linear probe to "next" to this entry (really a rehash)
+ hash++
+ }
+}
+
+// Reset will reset the map by simply deleting all keys to avoid
+// allocating a new map.
+func (m *fetchResultMap) Reset() {
+ for hash, entry := range m.lookup {
+ m.removeMapKey(hash, entry.key)
+ }
+}
+
+// Reallocate will avoid deleting all keys and reallocate a new
+// map, this is useful if you believe you have a large map and
+// will not need to grow back to a similar size.
+func (m *fetchResultMap) Reallocate() {
+ if m.initialSize > 0 {
+ m.lookup = make(map[fetchResultMapHash]fetchResultMapEntry, m.initialSize)
+ } else {
+ m.lookup = make(map[fetchResultMapHash]fetchResultMapEntry)
+ }
+}
diff --git a/src/query/storage/m3/consolidators/fetch_result_map_wrapper.go b/src/query/storage/m3/consolidators/fetch_result_map_wrapper.go
new file mode 100644
index 0000000000..40f910c392
--- /dev/null
+++ b/src/query/storage/m3/consolidators/fetch_result_map_wrapper.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "bytes"
+ "sort"
+
+ "github.com/m3db/m3/src/query/models"
+)
+
+type ascByID []multiResultSeries
+
+func (m ascByID) Len() int { return len(m) }
+func (m ascByID) Less(i, j int) bool {
+ return bytes.Compare(m[i].tags.LastComputedID(),
+ m[j].tags.LastComputedID()) == -1
+}
+func (m ascByID) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+type fetchResultMapWrapper struct {
+ resultMap *fetchResultMap
+}
+
+func (w *fetchResultMapWrapper) len() int {
+ return w.resultMap.Len()
+}
+
+func (w *fetchResultMapWrapper) list() []multiResultSeries {
+ result := make([]multiResultSeries, 0, w.len())
+ for _, results := range w.resultMap.Iter() {
+ result = append(result, results.value)
+ }
+
+ sort.Sort(ascByID(result))
+ return result
+}
+
+func (w *fetchResultMapWrapper) get(tags models.Tags) (multiResultSeries, bool) {
+ return w.resultMap.Get(tags)
+}
+
+func (w *fetchResultMapWrapper) close() {
+ w.resultMap.Reset()
+}
+
+func (w *fetchResultMapWrapper) set(
+ tags models.Tags, series multiResultSeries,
+) {
+ series.tags = tags
+ w.resultMap.SetUnsafe(tags, series, fetchResultMapSetUnsafeOptions{
+ NoCopyKey: true,
+ NoFinalizeKey: true,
+ })
+}
+
+// newFetchResultMapWrapper builds a wrapper on fetchResultMap functions.
+func newFetchResultMapWrapper(size int) *fetchResultMapWrapper {
+ return &fetchResultMapWrapper{
+ resultMap: _fetchResultMapAlloc(_fetchResultMapOptions{
+ hash: func(t models.Tags) fetchResultMapHash {
+ return fetchResultMapHash(t.LastComputedHashedID())
+ },
+ equals: func(x, y models.Tags) bool {
+ // NB: IDs are calculated once for tags, so any further calls to these
+ // equals is a simple lookup.
+ return bytes.Equal(x.LastComputedID(), y.LastComputedID())
+ },
+ initialSize: size,
+ }),
+ }
+}
diff --git a/src/query/storage/m3/consolidators/fetch_result_map_wrapper_test.go b/src/query/storage/m3/consolidators/fetch_result_map_wrapper_test.go
new file mode 100644
index 0000000000..be162b401c
--- /dev/null
+++ b/src/query/storage/m3/consolidators/fetch_result_map_wrapper_test.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFetchResultMapWrapper(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ size := 4
+ fetchMap := newFetchResultMapWrapper(size)
+ assert.Equal(t, 0, fetchMap.len())
+ assert.Equal(t, 0, len(fetchMap.list()))
+ _, found := fetchMap.get(models.EmptyTags())
+ assert.False(t, found)
+
+ tagName := "tag"
+ tags := func(i int) models.Tags {
+ return models.MustMakeTags(tagName, fmt.Sprint(i))
+ }
+
+ series := func(i int) encoding.SeriesIterator {
+ it := encoding.NewMockSeriesIterator(ctrl)
+ it.EXPECT().ID().Return(ident.StringID(fmt.Sprint(i))).AnyTimes()
+ return it
+ }
+
+ for i := 0; i < size*2; i++ {
+ fetchMap.set(tags(i), multiResultSeries{iter: series(i)})
+ }
+
+ assert.Equal(t, 8, fetchMap.len())
+ assert.Equal(t, 8, len(fetchMap.list()))
+ for i, l := range fetchMap.list() {
+ ex := fmt.Sprint(i)
+ assert.Equal(t, ex, l.iter.ID().String())
+ v, found := l.tags.Get([]byte(tagName))
+ require.True(t, found)
+ assert.Equal(t, fmt.Sprint(i), string(v))
+ }
+
+ // Overwrite tag 7.
+ fetchMap.set(tags(7), multiResultSeries{iter: series(700)})
+
+ assert.Equal(t, 8, fetchMap.len())
+ assert.Equal(t, 8, len(fetchMap.list()))
+ for i, l := range fetchMap.list() {
+ ex := fmt.Sprint(i)
+ if i == 7 {
+ assert.Equal(t, "700", l.iter.ID().String())
+ } else {
+ assert.Equal(t, ex, l.iter.ID().String())
+ }
+
+ v, found := l.tags.Get([]byte(tagName))
+ require.True(t, found)
+ assert.Equal(t, fmt.Sprint(i), string(v))
+ }
+}
diff --git a/src/query/storage/m3/consolidators/filter.go b/src/query/storage/m3/consolidators/filter.go
new file mode 100644
index 0000000000..39f94c75a5
--- /dev/null
+++ b/src/query/storage/m3/consolidators/filter.go
@@ -0,0 +1,149 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "bytes"
+
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+)
+
+func filterTagIterator(
+ iter ident.TagIterator,
+ filters models.Filters,
+) (bool, error) {
+ shouldFilter := shouldFilterTagIterator(iter, filters)
+ return shouldFilter, iter.Err()
+}
+
+func shouldFilterTagIterator(
+ iter ident.TagIterator,
+ filters models.Filters,
+) bool {
+ if len(filters) == 0 || iter.Remaining() == 0 {
+ return false
+ }
+
+ // NB: rewind iterator for re-use.
+ defer iter.Rewind()
+ for iter.Next() {
+ tag := iter.Current()
+
+ name := tag.Name.Bytes()
+ value := tag.Value.Bytes()
+ for _, f := range filters {
+ if !bytes.Equal(name, f.Name) {
+ continue
+ }
+
+ // 0 length filters implies filtering for entire range.
+ if len(f.Values) == 0 {
+ return true
+ }
+
+ for _, filterValue := range f.Values {
+ if bytes.Equal(filterValue, value) {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+func filterNames(tags []CompletedTag, filters models.Filters) []CompletedTag {
+ if len(filters) == 0 || len(tags) == 0 {
+ return tags
+ }
+
+ filteredTags := tags[:0]
+ for _, tag := range tags {
+ skip := false
+ for _, f := range filters {
+ if len(f.Values) != 0 {
+ // If this has filter values, it is not a name filter, and the result
+ // is valid.
+ continue
+ }
+
+ if bytes.Equal(tag.Name, f.Name) {
+ skip = true
+ break
+ }
+ }
+
+ if !skip {
+ filteredTags = append(filteredTags, tag)
+ }
+ }
+
+ return filteredTags
+}
+
+func filterTags(tags []CompletedTag, filters models.Filters) []CompletedTag {
+ if len(filters) == 0 || len(tags) == 0 {
+ return tags
+ }
+
+ filteredTags := tags[:0]
+ for _, tag := range tags {
+ for _, f := range filters {
+ if !bytes.Equal(tag.Name, f.Name) {
+ continue
+ }
+
+ // NB: Name filter matches.
+ if len(f.Values) == 0 {
+ tag.Values = tag.Values[:0]
+ break
+ }
+
+ filteredValues := tag.Values[:0]
+ for _, value := range tag.Values {
+ skip := false
+ for _, filterValue := range f.Values {
+ if bytes.Equal(filterValue, value) {
+ skip = true
+ break
+ }
+ }
+
+ if !skip {
+ filteredValues = append(filteredValues, value)
+ }
+ }
+
+ tag.Values = filteredValues
+ break
+ }
+
+ if len(tag.Values) == 0 {
+ // NB: all values for this tag are invalid.
+ continue
+ }
+
+ filteredTags = append(filteredTags, tag)
+ }
+
+ return filteredTags
+}
diff --git a/src/query/storage/m3/consolidators/filter_test.go b/src/query/storage/m3/consolidators/filter_test.go
new file mode 100644
index 0000000000..10bdcfea79
--- /dev/null
+++ b/src/query/storage/m3/consolidators/filter_test.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "testing"
+
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/x/ident"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type tag struct {
+ name string
+ value string
+}
+
+func b(s string) []byte { return []byte(s) }
+
+func TestFilterTagIterator(t *testing.T) {
+ it := ident.MustNewTagStringsIterator(
+ "foo", "bar",
+ "qux", "qaz",
+ )
+
+ tests := []struct {
+ ex bool
+ filters models.Filters
+ }{
+ {true, models.Filters{{Name: b("foo")}}},
+ {true, models.Filters{{Name: b("qux")}}},
+ {false, models.Filters{{Name: b("bar")}}},
+
+ {true, models.Filters{{Name: b("foo"),
+ Values: [][]byte{b("bar")}}}},
+ {true, models.Filters{{Name: b("foo"),
+ Values: [][]byte{b("qaz"), b("bar")}}}},
+ {false, models.Filters{{Name: b("foo"),
+ Values: [][]byte{b("qaz")}}}},
+ {false, models.Filters{{Name: b("foo"),
+ Values: [][]byte{b("qaz"), b("quince")}}}},
+
+ {true, models.Filters{{Name: b("qux"),
+ Values: [][]byte{b("qaz")}}}},
+ }
+
+ for _, tt := range tests {
+ shouldFilter, err := filterTagIterator(it, tt.filters)
+ assert.NoError(t, err)
+ assert.Equal(t, tt.ex, shouldFilter)
+ }
+
+ ex := []tag{
+ {name: "foo", value: "bar"},
+ {name: "qux", value: "qaz"},
+ }
+
+ // NB: assert the iterator is rewinded and iteratable normally.
+ for i := 0; it.Next(); i++ {
+ tag := it.Current()
+ assert.Equal(t, ex[i].name, tag.Name.String())
+ assert.Equal(t, ex[i].value, tag.Value.String())
+ }
+
+ require.NoError(t, it.Err())
+}
+
+func TestFilterTags(t *testing.T) {
+ tags := []CompletedTag{
+ CompletedTag{Name: b("foo"), Values: [][]byte{b("bar"), b("baz")}},
+ CompletedTag{Name: b("qux"), Values: [][]byte{b("quart"), b("quince")}},
+ CompletedTag{Name: b("abc"), Values: [][]byte{b("def")}},
+ }
+
+ filtered := filterTags(tags, models.Filters{
+ {Name: b("foo")},
+ {Name: b("qux"), Values: [][]byte{b("bar"), b("quince")}},
+ {Name: b("abc"), Values: [][]byte{b("def")}},
+ })
+
+ require.Equal(t, 1, len(filtered))
+ assert.Equal(t, b("qux"), filtered[0].Name)
+ require.Equal(t, 1, len(filtered[0].Values))
+ assert.Equal(t, b("quart"), filtered[0].Values[0])
+}
+
+func TestFilterTagNames(t *testing.T) {
+ tags := []CompletedTag{
+ CompletedTag{Name: b("foo")},
+ CompletedTag{Name: b("qux")},
+ CompletedTag{Name: b("quail")},
+ }
+
+ filtered := filterNames(tags, models.Filters{
+ {Name: b("foo"), Values: [][]byte{b("bar")}},
+ {Name: b("qux")},
+ })
+
+ require.Equal(t, 2, len(filtered))
+ assert.Equal(t, b("foo"), filtered[0].Name)
+ require.Equal(t, 0, len(filtered[0].Values))
+ assert.Equal(t, b("quail"), filtered[1].Name)
+ require.Equal(t, 0, len(filtered[1].Values))
+}
diff --git a/src/query/storage/m3/consolidators/id_dedupe_map.go b/src/query/storage/m3/consolidators/id_dedupe_map.go
new file mode 100644
index 0000000000..ba3171724b
--- /dev/null
+++ b/src/query/storage/m3/consolidators/id_dedupe_map.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "fmt"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+)
+
+type idDedupeMap struct {
+ fanout QueryFanoutType
+ series map[string]multiResultSeries
+ tagOpts models.TagOptions
+}
+
+func newIDDedupeMap(opts tagMapOpts) fetchDedupeMap {
+ return &idDedupeMap{
+ fanout: opts.fanout,
+ series: make(map[string]multiResultSeries, opts.size),
+ tagOpts: opts.tagOpts,
+ }
+}
+
+func (m *idDedupeMap) close() {}
+
+func (m *idDedupeMap) list() []multiResultSeries {
+ result := make([]multiResultSeries, 0, len(m.series))
+ for _, s := range m.series {
+ result = append(result, s)
+ }
+ return result
+}
+
+func (m *idDedupeMap) add(
+ iter encoding.SeriesIterator,
+ attrs storagemetadata.Attributes,
+) error {
+ id := iter.ID().String()
+
+ tags, err := FromIdentTagIteratorToTags(iter.Tags(), m.tagOpts)
+ if err != nil {
+ return err
+ }
+
+ iter.Tags().Rewind()
+ existing, exists := m.series[id]
+ if !exists {
+ // Does not exist, new addition
+ m.series[id] = multiResultSeries{
+ attrs: attrs,
+ iter: iter,
+ tags: tags,
+ }
+ return nil
+ }
+
+ var existsBetter bool
+ switch m.fanout {
+ case NamespaceCoversAllQueryRange:
+ // Already exists and resolution of result we are adding is not as precise
+ existsBetter = existing.attrs.Resolution <= attrs.Resolution
+ case NamespaceCoversPartialQueryRange:
+ // Already exists and either has longer retention, or the same retention
+ // and result we are adding is not as precise
+ existsLongerRetention := existing.attrs.Retention > attrs.Retention
+ existsSameRetentionEqualOrBetterResolution :=
+ existing.attrs.Retention == attrs.Retention &&
+ existing.attrs.Resolution <= attrs.Resolution
+ existsBetter = existsLongerRetention || existsSameRetentionEqualOrBetterResolution
+ default:
+ return fmt.Errorf("unknown query fanout type: %d", m.fanout)
+ }
+ if existsBetter {
+ // Existing result is already better
+ return nil
+ }
+
+ // Override
+ m.series[id] = multiResultSeries{
+ attrs: attrs,
+ iter: iter,
+ tags: tags,
+ }
+
+ return nil
+}
diff --git a/src/query/storage/m3/consolidators/match_type.go b/src/query/storage/m3/consolidators/match_type.go
new file mode 100644
index 0000000000..8574be9e42
--- /dev/null
+++ b/src/query/storage/m3/consolidators/match_type.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ defaultMatchType MatchType = MatchIDs
+)
+
+func (t MatchType) String() string {
+ switch t {
+ case MatchIDs:
+ return "ids"
+ case MatchTags:
+ return "tags"
+ }
+ return "unknown"
+}
+
+var validMatchTypes = []MatchType{
+ MatchIDs,
+ MatchTags,
+}
+
+// UnmarshalYAML unmarshals an ExtendedMetricsType into a valid type from string.
+func (t *MatchType) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var str string
+ if err := unmarshal(&str); err != nil {
+ return err
+ }
+
+ if str == "" {
+ *t = defaultMatchType
+ return nil
+ }
+
+ strs := make([]string, 0, len(validMatchTypes))
+ for _, valid := range validMatchTypes {
+ if str == valid.String() {
+ *t = valid
+ return nil
+ }
+
+ strs = append(strs, "'"+valid.String()+"'")
+ }
+
+ return fmt.Errorf("invalid MatchType '%s' valid types are: %s",
+ str, strings.Join(strs, ", "))
+}
diff --git a/src/query/storage/m3/multi_fetch_result.go b/src/query/storage/m3/consolidators/multi_fetch_result.go
similarity index 56%
rename from src/query/storage/m3/multi_fetch_result.go
rename to src/query/storage/m3/consolidators/multi_fetch_result.go
index 9988d4a4c7..4c5741a231 100644
--- a/src/query/storage/m3/multi_fetch_result.go
+++ b/src/query/storage/m3/consolidators/multi_fetch_result.go
@@ -18,46 +18,60 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package m3
+package consolidators
import (
- "fmt"
"sync"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/query/block"
- "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
xerrors "github.com/m3db/m3/src/x/errors"
)
-// TODO: use a better seriesIterators merge here
+type fetchDedupeMap interface {
+ add(iter encoding.SeriesIterator, attrs storagemetadata.Attributes) error
+ list() []multiResultSeries
+ close()
+}
+
type multiResult struct {
sync.Mutex
metadata block.ResultMetadata
- fanout queryFanoutType
- seenFirstAttrs storage.Attributes
+ fanout QueryFanoutType
+ seenFirstAttrs storagemetadata.Attributes
seenIters []encoding.SeriesIterators // track known iterators to avoid leaking
mergedIterators encoding.MutableSeriesIterators
- dedupeMap map[string]multiResultSeries
+ mergedTags []*models.Tags
+ dedupeMap fetchDedupeMap
err xerrors.MultiError
+ matchOpts MatchOptions
+ tagOpts models.TagOptions
pools encoding.IteratorPools
}
-func newMultiFetchResult(
- fanout queryFanoutType,
+// NewMultiFetchResult builds a new multi fetch result.
+func NewMultiFetchResult(
+ fanout QueryFanoutType,
pools encoding.IteratorPools,
+ opts MatchOptions,
+ tagOpts models.TagOptions,
) MultiFetchResult {
return &multiResult{
- metadata: block.NewResultMetadata(),
- fanout: fanout,
- pools: pools,
+ metadata: block.NewResultMetadata(),
+ fanout: fanout,
+ pools: pools,
+ matchOpts: opts,
+ tagOpts: tagOpts,
}
}
type multiResultSeries struct {
- attrs storage.Attributes
+ attrs storagemetadata.Attributes
iter encoding.SeriesIterator
+ tags models.Tags
}
func (r *multiResult) Close() error {
@@ -65,10 +79,12 @@ func (r *multiResult) Close() error {
defer r.Unlock()
for _, iters := range r.seenIters {
- iters.Close()
+ if iters != nil {
+ iters.Close()
+ }
}
- r.seenIters = nil
+ r.seenIters = nil
if r.mergedIterators != nil {
// NB(r): Since all the series iterators in the final result are held onto
// by the original iters in the seenIters slice we allow those iterators
@@ -86,24 +102,27 @@ func (r *multiResult) Close() error {
return nil
}
-func (r *multiResult) FinalResultWithAttrs() (SeriesFetchResult,
- []storage.Attributes, error) {
+func (r *multiResult) FinalResultWithAttrs() (
+ SeriesFetchResult, []storagemetadata.Attributes, error,
+) {
result, err := r.FinalResult()
if err != nil {
return result, nil, err
}
- attrs := make([]storage.Attributes, result.SeriesIterators.Len())
- // TODO: add testing around here.
- if r.dedupeMap == nil {
- for i := range attrs {
- attrs[i] = r.seenFirstAttrs
- }
- } else {
- i := 0
- for _, res := range r.dedupeMap {
- attrs[i] = res.attrs
- i++
+ var attrs []storagemetadata.Attributes
+ seriesData := result.seriesData
+ if iters := seriesData.seriesIterators; iters != nil {
+ l := iters.Len()
+ attrs = make([]storagemetadata.Attributes, 0, l)
+ if r.dedupeMap == nil {
+ for i := 0; i < l; i++ {
+ attrs = append(attrs, r.seenFirstAttrs)
+ }
+ } else {
+ for _, res := range r.dedupeMap.list() {
+ attrs = append(attrs, res.attrs)
+ }
}
}
@@ -114,46 +133,49 @@ func (r *multiResult) FinalResult() (SeriesFetchResult, error) {
r.Lock()
defer r.Unlock()
- result := SeriesFetchResult{Metadata: r.metadata}
err := r.err.LastError()
if err != nil {
- return result, err
+ return NewEmptyFetchResult(r.metadata), err
}
if r.mergedIterators != nil {
- result.SeriesIterators = r.mergedIterators
- return result, nil
+ return NewSeriesFetchResult(r.mergedIterators, nil, r.metadata)
}
if len(r.seenIters) == 0 {
- result.SeriesIterators = encoding.EmptySeriesIterators
- return result, nil
- }
-
- // can short-cicuit in this case
- if len(r.seenIters) == 1 {
- result.SeriesIterators = r.seenIters[0]
- return result, nil
+ return NewSeriesFetchResult(encoding.EmptySeriesIterators, nil, r.metadata)
}
// otherwise have to create a new seriesiters
- numSeries := len(r.dedupeMap)
+ dedupedList := r.dedupeMap.list()
+ numSeries := len(dedupedList)
r.mergedIterators = r.pools.MutableSeriesIterators().Get(numSeries)
r.mergedIterators.Reset(numSeries)
+ if r.mergedTags == nil {
+ r.mergedTags = make([]*models.Tags, numSeries)
+ }
+
+ lenCurr, lenNext := len(r.mergedTags), len(dedupedList)
+ if lenCurr < lenNext {
+ // If incoming list is longer, expand the stored list.
+ r.mergedTags = append(r.mergedTags, make([]*models.Tags, lenNext-lenCurr)...)
+ } else if lenCurr > lenNext {
+ // If incoming list somehow shorter, shrink stored list.
+ r.mergedTags = r.mergedTags[:lenNext]
+ }
- i := 0
- for _, res := range r.dedupeMap {
+ for i, res := range dedupedList {
r.mergedIterators.SetAt(i, res.iter)
- i++
+ r.mergedTags[i] = &dedupedList[i].tags
}
- result.SeriesIterators = r.mergedIterators
- return result, nil
+ return NewSeriesFetchResult(r.mergedIterators, r.mergedTags, r.metadata)
}
func (r *multiResult) Add(
- fetchResult SeriesFetchResult,
- attrs storage.Attributes,
+ newIterators encoding.SeriesIterators,
+ metadata block.ResultMetadata,
+ attrs storagemetadata.Attributes,
err error,
) {
r.Lock()
@@ -164,17 +186,20 @@ func (r *multiResult) Add(
return
}
+ if newIterators == nil || newIterators.Len() == 0 {
+ return
+ }
+
if len(r.seenIters) == 0 {
// store the first attributes seen
r.seenFirstAttrs = attrs
- r.metadata = fetchResult.Metadata
+ r.metadata = metadata
} else {
// NB: any non-exhaustive result set added makes the entire
// result non-exhaustive
- r.metadata = r.metadata.CombineMetadata(fetchResult.Metadata)
+ r.metadata = r.metadata.CombineMetadata(metadata)
}
- newIterators := fetchResult.SeriesIterators
r.seenIters = append(r.seenIters, newIterators)
// Need to check the error to bail early after accumulating the iterators
// otherwise when we close the the multi fetch result
@@ -183,17 +208,23 @@ func (r *multiResult) Add(
return
}
- if len(r.seenIters) < 2 {
- // don't need to create the de-dupe map until we need to actually need to
- // dedupe between two results
- return
- }
-
- if len(r.seenIters) == 2 {
+ if len(r.seenIters) == 1 {
// need to backfill the dedupe map from the first result first
first := r.seenIters[0]
- r.dedupeMap = make(map[string]multiResultSeries, first.Len())
+ opts := tagMapOpts{
+ fanout: r.fanout,
+ size: first.Len(),
+ tagOpts: r.tagOpts,
+ }
+
+ if r.matchOpts.MatchType == MatchIDs {
+ r.dedupeMap = newIDDedupeMap(opts)
+ } else {
+ r.dedupeMap = newTagDedupeMap(opts)
+ }
+
r.addOrUpdateDedupeMap(r.seenFirstAttrs, first)
+ return
}
// Now de-duplicate
@@ -201,48 +232,24 @@ func (r *multiResult) Add(
}
func (r *multiResult) addOrUpdateDedupeMap(
- attrs storage.Attributes,
+ attrs storagemetadata.Attributes,
newIterators encoding.SeriesIterators,
) {
for _, iter := range newIterators.Iters() {
- id := iter.ID().String()
-
- existing, exists := r.dedupeMap[id]
- if !exists {
- // Does not exist, new addition
- r.dedupeMap[id] = multiResultSeries{
- attrs: attrs,
- iter: iter,
- }
- continue
- }
-
- var existsBetter bool
- switch r.fanout {
- case namespaceCoversAllQueryRange:
- // Already exists and resolution of result we are adding is not as precise
- existsBetter = existing.attrs.Resolution <= attrs.Resolution
- case namespaceCoversPartialQueryRange:
- // Already exists and either has longer retention, or the same retention
- // and result we are adding is not as precise
- existsLongerRetention := existing.attrs.Retention > attrs.Retention
- existsSameRetentionEqualOrBetterResolution :=
- existing.attrs.Retention == attrs.Retention &&
- existing.attrs.Resolution <= attrs.Resolution
- existsBetter = existsLongerRetention || existsSameRetentionEqualOrBetterResolution
- default:
- r.err = r.err.Add(fmt.Errorf("unknown query fanout type: %d", r.fanout))
+ tagIter := iter.Tags()
+ shouldFilter, err := filterTagIterator(tagIter, r.tagOpts.Filters())
+ if err != nil {
+ r.err = r.err.Add(err)
return
}
- if existsBetter {
- // Existing result is already better
+
+ if shouldFilter {
+ // NB: skip here, the closer will free the series iterator regardless.
continue
}
- // Override
- r.dedupeMap[id] = multiResultSeries{
- attrs: attrs,
- iter: iter,
+ if err := r.dedupeMap.add(iter, attrs); err != nil {
+ r.err = r.err.Add(err)
}
}
}
diff --git a/src/query/storage/m3/consolidators/multi_fetch_result_tag_test.go b/src/query/storage/m3/consolidators/multi_fetch_result_tag_test.go
new file mode 100644
index 0000000000..9778bb7a19
--- /dev/null
+++ b/src/query/storage/m3/consolidators/multi_fetch_result_tag_test.go
@@ -0,0 +1,358 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+ xtest "github.com/m3db/m3/src/x/test"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type insertEntry struct {
+ iter encoding.SeriesIterators
+ attr storagemetadata.Attributes
+ meta block.ResultMetadata
+ err error
+}
+
+type dedupeTest struct {
+ name string
+ entries []insertEntry
+ expected []expectedSeries
+ exMeta block.ResultMetadata
+ exErr error
+ exAttrs []storagemetadata.Attributes
+}
+
+type expectedSeries struct {
+ tags []string
+ dps []dp
+}
+
+func TestMultiFetchResultTagDedupeMap(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ start := time.Now().Truncate(time.Hour)
+ step := func(i time.Duration) time.Time { return start.Add(time.Minute * i) }
+ unaggHr := storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
+ Resolution: time.Hour,
+ }
+
+ warn1Meta := block.NewResultMetadata()
+ warn1Meta.AddWarning("warn", "1")
+
+ warn2Meta := block.NewResultMetadata()
+ warn2Meta.AddWarning("warn", "2")
+
+ combinedMeta := warn1Meta.CombineMetadata(warn2Meta)
+
+ tests := []dedupeTest{
+ dedupeTest{
+ name: "same tags, same ids",
+ entries: []insertEntry{
+ {
+ attr: unaggHr,
+ meta: warn1Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ it(ctrl, dp{t: step(1), val: 1}, "id1", "foo", "bar"),
+ it(ctrl, dp{t: step(5), val: 6}, "id1", "foo", "bar"),
+ }, nil),
+ },
+ },
+ expected: []expectedSeries{
+ expectedSeries{
+ tags: []string{"foo", "bar"},
+ dps: []dp{dp{t: step(1), val: 1}, dp{t: step(5), val: 6}},
+ },
+ },
+ exMeta: warn1Meta,
+ exErr: nil,
+ exAttrs: []storagemetadata.Attributes{unaggHr},
+ },
+
+ dedupeTest{
+ name: "same tags, different ids",
+ entries: []insertEntry{
+ {
+ attr: unaggHr,
+ meta: warn1Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ it(ctrl, dp{t: step(1), val: 1}, "id1", "foo", "bar"),
+ it(ctrl, dp{t: step(5), val: 6}, "id2", "foo", "bar"),
+ }, nil),
+ },
+ },
+ expected: []expectedSeries{
+ expectedSeries{
+ tags: []string{"foo", "bar"},
+ dps: []dp{dp{t: step(1), val: 1}, dp{t: step(5), val: 6}},
+ },
+ },
+ exMeta: warn1Meta,
+ exErr: nil,
+ exAttrs: []storagemetadata.Attributes{unaggHr},
+ },
+
+ dedupeTest{
+ name: "different tags, same ids",
+ entries: []insertEntry{
+ {
+ attr: unaggHr,
+ meta: warn1Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ it(ctrl, dp{t: step(1), val: 1}, "id1", "foo", "bar"),
+ it(ctrl, dp{t: step(5), val: 6}, "id1", "foo", "baz"),
+ }, nil),
+ },
+ },
+ expected: []expectedSeries{
+ expectedSeries{
+ tags: []string{"foo", "bar"},
+ dps: []dp{dp{t: step(1), val: 1}},
+ },
+ expectedSeries{
+ tags: []string{"foo", "baz"},
+ dps: []dp{dp{t: step(5), val: 6}},
+ },
+ },
+ exMeta: warn1Meta,
+ exErr: nil,
+ exAttrs: []storagemetadata.Attributes{unaggHr, unaggHr},
+ },
+
+ dedupeTest{
+ name: "one iterator, mixed scenario",
+ entries: []insertEntry{
+ {
+ attr: unaggHr,
+ meta: warn1Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ // Same tags, different IDs.
+ it(ctrl, dp{t: step(1), val: 1}, "id1", "foo", "bar", "qux", "quail"),
+ it(ctrl, dp{t: step(2), val: 2}, "id2", "foo", "bar", "qux", "quail"),
+ // Different tags, same IDs.
+ it(ctrl, dp{t: step(1), val: 3}, "id3", "foo", "bar", "qux", "quart"),
+ it(ctrl, dp{t: step(2), val: 4}, "id3", "foo", "bar", "qux", "quz"),
+ // Same tags same IDs.
+ it(ctrl, dp{t: step(1), val: 5}, "id4", "foo", "bar", "qux", "queen"),
+ it(ctrl, dp{t: step(2), val: 6}, "id4", "foo", "bar", "qux", "queen"),
+ }, nil),
+ },
+ },
+ expected: []expectedSeries{
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "quail"},
+ dps: []dp{dp{t: step(1), val: 1}, dp{t: step(2), val: 2}},
+ },
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "quart"},
+ dps: []dp{dp{t: step(1), val: 3}},
+ },
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "queen"},
+ dps: []dp{dp{t: step(1), val: 5}, dp{t: step(2), val: 6}},
+ },
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "quz"},
+ dps: []dp{dp{t: step(2), val: 4}},
+ },
+ },
+ exMeta: warn1Meta,
+ exErr: nil,
+ exAttrs: []storagemetadata.Attributes{unaggHr, unaggHr, unaggHr, unaggHr},
+ },
+
+ dedupeTest{
+ name: "multiple iterators, mixed scenario",
+ entries: []insertEntry{
+ insertEntry{
+ attr: unaggHr,
+ meta: warn1Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ it(ctrl, dp{t: step(1), val: 1}, "id1", "foo", "bar", "qux", "quail"),
+ it(ctrl, dp{t: step(2), val: 2}, "id2", "foo", "bar", "qux", "quail"),
+ }, nil),
+ },
+ insertEntry{
+ attr: unaggHr,
+ meta: warn2Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ it(ctrl, dp{t: step(1), val: 3}, "id3", "foo", "bar", "qux", "quart"),
+ it(ctrl, dp{t: step(2), val: 4}, "id3", "foo", "bar", "qux", "quz"),
+ }, nil),
+ },
+ insertEntry{
+ attr: unaggHr,
+ meta: warn1Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ it(ctrl, dp{t: step(1), val: 5}, "id4", "foo", "bar", "qux", "queen"),
+ it(ctrl, dp{t: step(2), val: 6}, "id4", "foo", "bar", "qux", "queen"),
+ }, nil),
+ },
+ },
+ expected: []expectedSeries{
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "quail"},
+ dps: []dp{dp{t: step(1), val: 1}, dp{t: step(2), val: 2}},
+ },
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "quart"},
+ dps: []dp{dp{t: step(1), val: 3}},
+ },
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "queen"},
+ dps: []dp{dp{t: step(1), val: 5}, dp{t: step(2), val: 6}},
+ },
+ expectedSeries{
+ tags: []string{"foo", "bar", "qux", "quz"},
+ dps: []dp{dp{t: step(2), val: 4}},
+ },
+ },
+ exMeta: combinedMeta,
+ exErr: nil,
+ exAttrs: []storagemetadata.Attributes{unaggHr, unaggHr, unaggHr, unaggHr},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ testMultiFetchResultTagDedupeMap(t, ctrl, tt, models.NewTagOptions())
+ })
+ }
+}
+
+func testMultiFetchResultTagDedupeMap(
+ t *testing.T,
+ ctrl *gomock.Controller,
+ test dedupeTest,
+ tagOptions models.TagOptions,
+) {
+ require.True(t, len(test.entries) > 0,
+ "must have more than 1 iterator in testMultiFetchResultTagDedupeMap")
+
+ pools := generateIteratorPools(ctrl)
+ opts := MatchOptions{
+ MatchType: MatchTags,
+ }
+
+ r := NewMultiFetchResult(NamespaceCoversAllQueryRange, pools,
+ opts, tagOptions)
+
+ for _, entry := range test.entries {
+ r.Add(entry.iter, entry.meta, entry.attr, entry.err)
+ }
+
+ result, attrs, err := r.FinalResultWithAttrs()
+ require.NoError(t, err)
+
+ assert.Equal(t, test.exMeta, result.Metadata)
+ require.Equal(t, len(test.exAttrs), len(attrs))
+ for i, ex := range test.exAttrs {
+ assert.Equal(t, ex, attrs[i])
+ }
+
+ c := len(result.SeriesIterators())
+ require.Equal(t, len(test.expected), c)
+
+ for i, ex := range test.expected {
+ iter, tags, err := result.IterTagsAtIndex(i, tagOptions)
+ require.NoError(t, err)
+
+ exTags := models.MustMakeTags(ex.tags...)
+ assert.Equal(t, exTags.String(), tags.String())
+ for j := 0; iter.Next(); j++ {
+ dp, _, _ := iter.Current()
+ exDp := ex.dps[j]
+ assert.Equal(t, exDp.val, dp.Value)
+ assert.Equal(t, exDp.t, dp.Timestamp)
+ }
+
+ assert.NoError(t, iter.Err())
+ }
+
+ assert.NoError(t, r.Close())
+}
+
+func TestFilteredInsert(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ start := time.Now().Truncate(time.Hour)
+ step := func(i time.Duration) time.Time { return start.Add(time.Minute * i) }
+ unaggHr := storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
+ Resolution: time.Hour,
+ }
+
+ warn1Meta := block.NewResultMetadata()
+ warn1Meta.AddWarning("warn", "1")
+
+ warn2Meta := block.NewResultMetadata()
+ warn2Meta.AddWarning("warn", "2")
+
+ dedupe := dedupeTest{
+ name: "same tags, same ids",
+ entries: []insertEntry{
+ {
+ attr: unaggHr,
+ meta: warn1Meta,
+ err: nil,
+ iter: encoding.NewSeriesIterators([]encoding.SeriesIterator{
+ it(ctrl, dp{t: step(1), val: 1}, "id1", "foo", "bar"),
+ notReadIt(ctrl, dp{t: step(5), val: 6}, "id1", "foo", "baz"),
+ }, nil),
+ },
+ },
+ expected: []expectedSeries{
+ expectedSeries{
+ tags: []string{"foo", "bar"},
+ dps: []dp{dp{t: step(1), val: 1}, dp{t: step(5), val: 6}},
+ },
+ },
+ exMeta: warn1Meta,
+ exErr: nil,
+ exAttrs: []storagemetadata.Attributes{unaggHr},
+ }
+
+ opts := models.NewTagOptions().SetFilters(models.Filters{
+ models.Filter{Name: b("foo"), Values: [][]byte{b("baz")}},
+ })
+
+ testMultiFetchResultTagDedupeMap(t, ctrl, dedupe, opts)
+}
diff --git a/src/query/storage/m3/multi_fetch_result_test.go b/src/query/storage/m3/consolidators/multi_fetch_result_test.go
similarity index 79%
rename from src/query/storage/m3/multi_fetch_result_test.go
rename to src/query/storage/m3/consolidators/multi_fetch_result_test.go
index 4775344e2b..efe812348c 100644
--- a/src/query/storage/m3/multi_fetch_result_test.go
+++ b/src/query/storage/m3/consolidators/multi_fetch_result_test.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package m3
+package consolidators
import (
"fmt"
@@ -27,13 +27,19 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/query/block"
- "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
+var defaultTestOpts = MatchOptions{
+ MatchType: defaultMatchType,
+}
+
const (
common = "common"
short = "short"
@@ -49,14 +55,16 @@ func generateSeriesIterators(
iter := encoding.NewMockSeriesIterator(ctrl)
iter.EXPECT().ID().Return(ident.StringID(common)).MinTimes(1)
iter.EXPECT().Namespace().Return(ident.StringID(ns)).MaxTimes(1)
+ iter.EXPECT().Tags().Return(ident.EmptyTagIterator).AnyTimes()
unique := encoding.NewMockSeriesIterator(ctrl)
unique.EXPECT().ID().Return(ident.StringID(ns)).MinTimes(1)
unique.EXPECT().Namespace().Return(ident.StringID(ns)).MaxTimes(1)
+ unique.EXPECT().Tags().Return(ident.EmptyTagIterator).AnyTimes()
iters := encoding.NewMockSeriesIterators(ctrl)
iters.EXPECT().Close().Return().Times(1)
- iters.EXPECT().Len().Return(1).MaxTimes(1)
+ iters.EXPECT().Len().Return(1).AnyTimes()
iters.EXPECT().Iters().Return([]encoding.SeriesIterator{iter, unique})
return iters
@@ -80,37 +88,37 @@ func generateIteratorPools(ctrl *gomock.Controller) encoding.IteratorPools {
}
func TestMultiResult(t *testing.T) {
- testMultiResult(t, namespaceCoversPartialQueryRange, long)
- testMultiResult(t, namespaceCoversAllQueryRange, unaggregated)
+ testMultiResult(t, NamespaceCoversPartialQueryRange, long)
+ testMultiResult(t, NamespaceCoversAllQueryRange, unaggregated)
}
-func testMultiResult(t *testing.T, fanoutType queryFanoutType, expected string) {
- ctrl := gomock.NewController(t)
+func testMultiResult(t *testing.T, fanoutType QueryFanoutType, expected string) {
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
namespaces := []struct {
- attrs storage.Attributes
+ attrs storagemetadata.Attributes
ns string
}{
{
- attrs: storage.Attributes{
- MetricsType: storage.UnaggregatedMetricsType,
+ attrs: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
Retention: 24 * time.Hour,
Resolution: 0 * time.Minute,
},
ns: unaggregated,
},
{
- attrs: storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ attrs: storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 360 * time.Hour,
Resolution: 2 * time.Minute,
},
ns: short,
},
{
- attrs: storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ attrs: storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 17520 * time.Hour,
Resolution: 10 * time.Minute,
},
@@ -119,16 +127,13 @@ func testMultiResult(t *testing.T, fanoutType queryFanoutType, expected string)
}
pools := generateIteratorPools(ctrl)
- r := newMultiFetchResult(fanoutType, pools)
+ r := NewMultiFetchResult(fanoutType, pools,
+ defaultTestOpts, models.NewTagOptions())
+ meta := block.NewResultMetadata()
for _, ns := range namespaces {
iters := generateSeriesIterators(ctrl, ns.ns)
- seriesFetchResult := SeriesFetchResult{
- Metadata: block.NewResultMetadata(),
- SeriesIterators: iters,
- }
-
- r.Add(seriesFetchResult, ns.attrs, nil)
+ r.Add(iters, meta, ns.attrs, nil)
}
result, err := r.FinalResult()
@@ -138,7 +143,7 @@ func testMultiResult(t *testing.T, fanoutType queryFanoutType, expected string)
assert.True(t, result.Metadata.LocalOnly)
assert.Equal(t, 0, len(result.Metadata.Warnings))
- iters := result.SeriesIterators
+ iters := result.seriesData.seriesIterators
assert.Equal(t, 4, iters.Len())
assert.Equal(t, 4, len(iters.Iters()))
@@ -169,11 +174,12 @@ var exhaustTests = []struct {
}
func TestExhaustiveMerge(t *testing.T) {
- ctrl := gomock.NewController(t)
+ ctrl := xtest.NewController(t)
defer ctrl.Finish()
pools := generateIteratorPools(ctrl)
- r := newMultiFetchResult(namespaceCoversAllQueryRange, pools)
+ r := NewMultiFetchResult(NamespaceCoversAllQueryRange, pools,
+ defaultTestOpts, models.NewTagOptions())
for _, tt := range exhaustTests {
t.Run(tt.name, func(t *testing.T) {
for i, ex := range tt.exhaustives {
@@ -185,12 +191,7 @@ func TestExhaustiveMerge(t *testing.T) {
meta := block.NewResultMetadata()
meta.Exhaustive = ex
- seriesFetchResult := SeriesFetchResult{
- Metadata: meta,
- SeriesIterators: iters,
- }
-
- r.Add(seriesFetchResult, storage.Attributes{}, nil)
+ r.Add(iters, meta, storagemetadata.Attributes{}, nil)
}
result, err := r.FinalResult()
diff --git a/src/query/storage/m3/consolidators/series_fetch_result.go b/src/query/storage/m3/consolidators/series_fetch_result.go
new file mode 100644
index 0000000000..1af58dfd7b
--- /dev/null
+++ b/src/query/storage/m3/consolidators/series_fetch_result.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "fmt"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
+)
+
+// NewSeriesFetchResult creates a new series fetch result using the given
+// iterators.
+func NewSeriesFetchResult(
+ iters encoding.SeriesIterators,
+ tags []*models.Tags,
+ meta block.ResultMetadata,
+) (SeriesFetchResult, error) {
+ if iters == nil || iters.Len() == 0 {
+ return SeriesFetchResult{
+ Metadata: meta,
+ seriesData: seriesData{
+ seriesIterators: nil,
+ tags: []*models.Tags{},
+ },
+ }, nil
+ }
+
+ if tags == nil {
+ tags = make([]*models.Tags, iters.Len())
+ }
+
+ return SeriesFetchResult{
+ Metadata: meta,
+ seriesData: seriesData{
+ seriesIterators: iters,
+ tags: tags,
+ },
+ }, nil
+}
+
+// NewEmptyFetchResult creates a new empty series fetch result.
+func NewEmptyFetchResult(
+ meta block.ResultMetadata,
+) SeriesFetchResult {
+ return SeriesFetchResult{
+ Metadata: meta,
+ seriesData: seriesData{
+ seriesIterators: nil,
+ tags: []*models.Tags{},
+ },
+ }
+}
+
+// Verify verifies the fetch result is valid.
+func (r *SeriesFetchResult) Verify() error {
+ if r.seriesData.tags == nil || r.seriesData.seriesIterators == nil {
+ return nil
+ }
+
+ tagLen := len(r.seriesData.tags)
+ iterLen := r.seriesData.seriesIterators.Len()
+ if tagLen != iterLen {
+ return fmt.Errorf("tag length %d does not match iterator length %d",
+ tagLen, iterLen)
+ }
+
+ return nil
+}
+
+// Count returns the total number of contained series iterators.
+func (r *SeriesFetchResult) Count() int {
+ if r.seriesData.seriesIterators == nil {
+ return 0
+ }
+
+ return r.seriesData.seriesIterators.Len()
+}
+
+// Close no-ops; these should be closed by the enclosing iterator.
+func (r *SeriesFetchResult) Close() {
+
+}
+
+// IterTagsAtIndex returns the tag iterator and tags at the given index.
+func (r *SeriesFetchResult) IterTagsAtIndex(
+ idx int, tagOpts models.TagOptions,
+) (encoding.SeriesIterator, models.Tags, error) {
+ tags := models.EmptyTags()
+ if idx < 0 || idx > len(r.seriesData.tags) {
+ return nil, tags, fmt.Errorf("series idx(%d) out of "+
+ "bounds %d ", idx, len(r.seriesData.tags))
+ }
+
+ iters := r.seriesData.seriesIterators.Iters()
+ if idx < len(r.seriesData.tags) {
+ if r.seriesData.tags[idx] == nil {
+ var err error
+ iter := iters[idx].Tags()
+ tags, err = FromIdentTagIteratorToTags(iter, tagOpts)
+ if err != nil {
+ return nil, models.EmptyTags(), err
+ }
+
+ iter.Rewind()
+ r.seriesData.tags[idx] = &tags
+ } else {
+ tags = *r.seriesData.tags[idx]
+ }
+ }
+
+ return iters[idx], tags, nil
+}
+
+// SeriesIterators returns the series iterators.
+func (r *SeriesFetchResult) SeriesIterators() []encoding.SeriesIterator {
+ if r.seriesData.seriesIterators == nil {
+ return []encoding.SeriesIterator{}
+ }
+
+ return r.seriesData.seriesIterators.Iters()
+}
diff --git a/src/query/storage/m3/consolidators/tag_dedupe_map.go b/src/query/storage/m3/consolidators/tag_dedupe_map.go
new file mode 100644
index 0000000000..28ad17b568
--- /dev/null
+++ b/src/query/storage/m3/consolidators/tag_dedupe_map.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "fmt"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+)
+
+type tagDedupeMap struct {
+ fanout QueryFanoutType
+ mapWrapper *fetchResultMapWrapper
+ tagOpts models.TagOptions
+}
+
+type tagMapOpts struct {
+ size int
+ fanout QueryFanoutType
+ tagOpts models.TagOptions
+}
+
+func newTagDedupeMap(opts tagMapOpts) fetchDedupeMap {
+ return &tagDedupeMap{
+ fanout: opts.fanout,
+ mapWrapper: newFetchResultMapWrapper(opts.size),
+ tagOpts: opts.tagOpts,
+ }
+}
+
+func (m *tagDedupeMap) close() {
+ m.mapWrapper.close()
+}
+
+func (m *tagDedupeMap) list() []multiResultSeries {
+ return m.mapWrapper.list()
+}
+
+func (m *tagDedupeMap) add(
+ iter encoding.SeriesIterator,
+ attrs storagemetadata.Attributes,
+) error {
+ tags, err := FromIdentTagIteratorToTags(iter.Tags(), m.tagOpts)
+ if err != nil {
+ return err
+ }
+
+ iter.Tags().Rewind()
+ series := multiResultSeries{
+ iter: iter,
+ attrs: attrs,
+ tags: tags,
+ }
+
+ existing, exists := m.mapWrapper.get(tags)
+ if !exists {
+ m.mapWrapper.set(tags, series)
+ return nil
+ }
+
+ var existsBetter bool
+ var existsEqual bool
+ switch m.fanout {
+ case NamespaceCoversAllQueryRange:
+ // Already exists and resolution of result we are adding is not as precise
+ existsBetter = existing.attrs.Resolution < attrs.Resolution
+ existsEqual = existing.attrs.Resolution == attrs.Resolution
+ case NamespaceCoversPartialQueryRange:
+ // Already exists and either has longer retention, or the same retention
+ // and result we are adding is not as precise
+ existsLongerRetention := existing.attrs.Retention > attrs.Retention
+ existsSameRetentionEqualOrBetterResolution :=
+ existing.attrs.Retention == attrs.Retention &&
+ existing.attrs.Resolution < attrs.Resolution
+ existsBetter = existsLongerRetention || existsSameRetentionEqualOrBetterResolution
+
+ existsEqual = existing.attrs.Retention == attrs.Retention &&
+ existing.attrs.Resolution == attrs.Resolution
+ default:
+ return fmt.Errorf("unknown query fanout type: %d", m.fanout)
+ }
+
+ if existsEqual {
+ acc, ok := existing.iter.(encoding.SeriesIteratorAccumulator)
+ if !ok {
+ acc, err = encoding.NewSeriesIteratorAccumulator(existing.iter,
+ encoding.SeriesAccumulatorOptions{})
+ if err != nil {
+ return err
+ }
+ }
+
+ if err := acc.Add(iter); err != nil {
+ return err
+ }
+
+ // Update accumulated result series.
+ series.iter = acc
+ m.mapWrapper.set(tags, series)
+ return nil
+ }
+
+ if existsBetter {
+ // Existing result is already better
+ return nil
+ }
+
+ // Override
+ existing.iter.Close()
+ m.mapWrapper.set(tags, series)
+
+ return nil
+}
diff --git a/src/query/storage/m3/consolidators/tag_dedupe_map_test.go b/src/query/storage/m3/consolidators/tag_dedupe_map_test.go
new file mode 100644
index 0000000000..929d182d63
--- /dev/null
+++ b/src/query/storage/m3/consolidators/tag_dedupe_map_test.go
@@ -0,0 +1,166 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+ "github.com/m3db/m3/src/x/ident"
+ xtest "github.com/m3db/m3/src/x/test"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func verifyDedupeMap(
+ t *testing.T,
+ dedupeMap fetchDedupeMap,
+ expected ...ts.Datapoint,
+) {
+ series := dedupeMap.list()
+ require.Equal(t, 1, len(series))
+ val, found := series[0].tags.Get([]byte("foo"))
+ require.True(t, found)
+ assert.Equal(t, "bar", string(val))
+ val, found = series[0].tags.Get([]byte("qux"))
+ require.True(t, found)
+ assert.Equal(t, "quail", string(val))
+
+ iter := series[0].iter
+ i := 0
+ for iter.Next() {
+ dp, _, _ := iter.Current()
+ ex := expected[i]
+ ex.TimestampNanos = xtime.ToUnixNano(ex.Timestamp)
+ assert.Equal(t, ex, dp)
+ i++
+ }
+
+ assert.Equal(t, len(expected), i)
+}
+
+type dp struct {
+ t time.Time
+ val float64
+}
+
+func it(
+ ctrl *gomock.Controller,
+ dp dp,
+ id string,
+ tags ...string,
+) encoding.SeriesIterator {
+ it := encoding.NewMockSeriesIterator(ctrl)
+ it.EXPECT().ID().Return(ident.StringID(id)).AnyTimes()
+
+ it.EXPECT().Namespace().Return(ident.StringID("ns")).AnyTimes()
+ it.EXPECT().Start().Return(dp.t).AnyTimes()
+ it.EXPECT().End().Return(dp.t.Add(time.Hour)).AnyTimes()
+
+ tagIter := ident.MustNewTagStringsIterator(tags...)
+ it.EXPECT().Tags().Return(tagIter).AnyTimes()
+
+ it.EXPECT().Next().Return(true)
+ it.EXPECT().Current().
+ Return(ts.Datapoint{
+ TimestampNanos: xtime.ToUnixNano(dp.t),
+ Timestamp: dp.t,
+ Value: dp.val,
+ }, xtime.Second, nil).AnyTimes()
+ it.EXPECT().Next().Return(false)
+ it.EXPECT().Err().Return(nil).AnyTimes()
+ it.EXPECT().Close().MinTimes(1)
+
+ return it
+}
+
+func notReadIt(
+ ctrl *gomock.Controller,
+ dp dp,
+ id string,
+ tags ...string,
+) encoding.SeriesIterator {
+ it := encoding.NewMockSeriesIterator(ctrl)
+ it.EXPECT().ID().Return(ident.StringID(id)).AnyTimes()
+
+ it.EXPECT().Namespace().Return(ident.StringID("ns")).AnyTimes()
+ it.EXPECT().Start().Return(dp.t).AnyTimes()
+ it.EXPECT().End().Return(dp.t.Add(time.Hour)).AnyTimes()
+
+ tagIter := ident.MustNewTagStringsIterator(tags...)
+ it.EXPECT().Tags().Return(tagIter).AnyTimes()
+
+ it.EXPECT().Err().Return(nil).AnyTimes()
+ it.EXPECT().Close().MinTimes(1)
+
+ return it
+}
+
+func TestTagDedupeMap(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ dedupeMap := newTagDedupeMap(tagMapOpts{
+ size: 8,
+ fanout: NamespaceCoversAllQueryRange,
+ tagOpts: models.NewTagOptions(),
+ })
+
+ start := time.Now().Truncate(time.Hour)
+ attrs := storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
+ Resolution: time.Hour,
+ }
+
+ dedupeMap.add(it(ctrl, dp{t: start, val: 14},
+ "id1", "foo", "bar", "qux", "quail"), attrs)
+ verifyDedupeMap(t, dedupeMap, ts.Datapoint{Timestamp: start, Value: 14})
+
+ // Lower resolution must override.
+ attrs.Resolution = time.Minute
+ dedupeMap.add(it(ctrl, dp{t: start.Add(time.Minute), val: 10},
+ "id1", "foo", "bar", "qux", "quail"), attrs)
+ dedupeMap.add(it(ctrl, dp{t: start.Add(time.Minute * 2), val: 12},
+ "id2", "foo", "bar", "qux", "quail"), attrs)
+
+ verifyDedupeMap(t, dedupeMap,
+ ts.Datapoint{Timestamp: start.Add(time.Minute), Value: 10},
+ ts.Datapoint{Timestamp: start.Add(time.Minute * 2), Value: 12})
+
+ // Lower resolution must override.
+ attrs.Resolution = time.Second
+ dedupeMap.add(it(ctrl, dp{t: start, val: 100},
+ "id1", "foo", "bar", "qux", "quail"), attrs)
+ verifyDedupeMap(t, dedupeMap, ts.Datapoint{Timestamp: start, Value: 100})
+
+ for _, it := range dedupeMap.list() {
+ iter := it.iter
+ require.NoError(t, iter.Err())
+ iter.Close()
+ }
+}
diff --git a/src/query/storage/m3/consolidators/types.go b/src/query/storage/m3/consolidators/types.go
new file mode 100644
index 0000000000..e91b46927f
--- /dev/null
+++ b/src/query/storage/m3/consolidators/types.go
@@ -0,0 +1,172 @@
+// Copyright (c) 2018 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package consolidators
+
+import (
+ "github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+ "github.com/m3db/m3/src/x/ident"
+)
+
+// MatchOptions are multi fetch matching options.
+type MatchOptions struct {
+ // MatchType is the equality matching type by which to compare series.
+ MatchType MatchType
+}
+
+// MatchType is a equality match type.
+type MatchType uint
+
+const (
+ // MatchIDs matches series based on ID only.
+ MatchIDs MatchType = iota
+ // MatchTags matcher series based on tags.
+ MatchTags
+)
+
+// QueryFanoutType is a query fanout type.
+type QueryFanoutType uint
+
+const (
+ // NamespaceInvalid indicates there is no valid namespace.
+ NamespaceInvalid QueryFanoutType = iota
+ // NamespaceCoversAllQueryRange indicates the given namespace covers
+ // the entire query range.
+ NamespaceCoversAllQueryRange
+ // NamespaceCoversPartialQueryRange indicates the given namespace covers
+ // a partial query range.
+ NamespaceCoversPartialQueryRange
+)
+
+func (t QueryFanoutType) String() string {
+ switch t {
+ case NamespaceCoversAllQueryRange:
+ return "coversAllQueryRange"
+ case NamespaceCoversPartialQueryRange:
+ return "coversPartialQueryRange"
+ default:
+ return "unknown"
+ }
+}
+
+// MultiFetchResult is a deduping accumalator for series iterators
+// that allows merging using a given strategy.
+type MultiFetchResult interface {
+ // Add appends series fetch results to the accumulator.
+ Add(
+ seriesIterators encoding.SeriesIterators,
+ metadata block.ResultMetadata,
+ attrs storagemetadata.Attributes,
+ err error,
+ )
+
+ // FinalResult returns a series fetch result containing deduplicated series
+ // iterators and their metadata, and any errors encountered.
+ FinalResult() (SeriesFetchResult, error)
+
+ // FinalResult returns a series fetch result containing deduplicated series
+ // iterators and their metadata, as well as any attributes corresponding to
+ // these results, and any errors encountered.
+ FinalResultWithAttrs() (SeriesFetchResult, []storagemetadata.Attributes, error)
+
+ // Close releases all resources held by this accumulator.
+ Close() error
+}
+
+// SeriesFetchResult is a fetch result with associated metadata.
+type SeriesFetchResult struct {
+ // Metadata is the set of metadata associated with the fetch result.
+ Metadata block.ResultMetadata
+ // seriesData is the list of series data for the result.
+ seriesData seriesData
+}
+
+// SeriesData is fetched series data.
+type seriesData struct {
+ // seriesIterators are the series iterators for the series.
+ seriesIterators encoding.SeriesIterators
+ // tags are the decoded tags for the series.
+ tags []*models.Tags
+}
+
+// TagResult is a fetch tag result with associated metadata.
+type TagResult struct {
+ // Metadata is the set of metadata associated with the fetch result.
+ Metadata block.ResultMetadata
+ // Tags is the list of tags for the result.
+ Tags []MultiTagResult
+}
+
+// MultiFetchTagsResult is a deduping accumalator for tag iterators.
+type MultiFetchTagsResult interface {
+ // Add adds tagged ID iterators to the accumulator.
+ Add(
+ newIterator client.TaggedIDsIterator,
+ meta block.ResultMetadata,
+ err error,
+ )
+ // FinalResult returns a deduped list of tag iterators with
+ // corresponding series IDs.
+ FinalResult() (TagResult, error)
+ // Close releases all resources held by this accumulator.
+ Close() error
+}
+
+// CompletedTag represents a tag retrieved by a complete tags query.
+type CompletedTag struct {
+ // Name the name of the tag.
+ Name []byte
+ // Values is a set of possible values for the tag.
+ // NB: if the parent CompleteTagsResult is set to CompleteNameOnly, this is
+ // expected to be empty.
+ Values [][]byte
+}
+
+// CompleteTagsResult represents a set of autocompleted tag names and values
+type CompleteTagsResult struct {
+ // CompleteNameOnly indicates if the tags in this result are expected to have
+ // both names and values, or only names.
+ CompleteNameOnly bool
+ // CompletedTag is a list of completed tags.
+ CompletedTags []CompletedTag
+ // Metadata describes any metadata for the operation.
+ Metadata block.ResultMetadata
+}
+
+// CompleteTagsResultBuilder is a builder that accumulates and deduplicates
+// incoming CompleteTagsResult values.
+type CompleteTagsResultBuilder interface {
+ // Add appends an incoming CompleteTagsResult.
+ Add(*CompleteTagsResult) error
+ // Build builds a completed tag result.
+ Build() CompleteTagsResult
+}
+
+// MultiTagResult represents a tag iterator with its string ID.
+type MultiTagResult struct {
+ // ID is the series ID.
+ ID ident.ID
+ // Iter is the tag iterator for the series.
+ Iter ident.TagIterator
+}
diff --git a/src/query/storage/m3/m3_mock.go b/src/query/storage/m3/m3_mock.go
index a990403d77..6ef74722b0 100644
--- a/src/query/storage/m3/m3_mock.go
+++ b/src/query/storage/m3/m3_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/query/storage/m3 (interfaces: Storage)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/golang/mock/gomock"
)
@@ -72,10 +73,10 @@ func (mr *MockStorageMockRecorder) Close() *gomock.Call {
}
// CompleteTags mocks base method
-func (m *MockStorage) CompleteTags(arg0 context.Context, arg1 *storage.CompleteTagsQuery, arg2 *storage.FetchOptions) (*storage.CompleteTagsResult, error) {
+func (m *MockStorage) CompleteTags(arg0 context.Context, arg1 *storage.CompleteTagsQuery, arg2 *storage.FetchOptions) (*consolidators.CompleteTagsResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CompleteTags", arg0, arg1, arg2)
- ret0, _ := ret[0].(*storage.CompleteTagsResult)
+ ret0, _ := ret[0].(*consolidators.CompleteTagsResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -87,10 +88,10 @@ func (mr *MockStorageMockRecorder) CompleteTags(arg0, arg1, arg2 interface{}) *g
}
// CompleteTagsCompressed mocks base method
-func (m *MockStorage) CompleteTagsCompressed(arg0 context.Context, arg1 *storage.CompleteTagsQuery, arg2 *storage.FetchOptions) (*storage.CompleteTagsResult, error) {
+func (m *MockStorage) CompleteTagsCompressed(arg0 context.Context, arg1 *storage.CompleteTagsQuery, arg2 *storage.FetchOptions) (*consolidators.CompleteTagsResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CompleteTagsCompressed", arg0, arg1, arg2)
- ret0, _ := ret[0].(*storage.CompleteTagsResult)
+ ret0, _ := ret[0].(*consolidators.CompleteTagsResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -131,10 +132,10 @@ func (mr *MockStorageMockRecorder) FetchBlocks(arg0, arg1, arg2 interface{}) *go
}
// FetchCompressed mocks base method
-func (m *MockStorage) FetchCompressed(arg0 context.Context, arg1 *storage.FetchQuery, arg2 *storage.FetchOptions) (SeriesFetchResult, Cleanup, error) {
+func (m *MockStorage) FetchCompressed(arg0 context.Context, arg1 *storage.FetchQuery, arg2 *storage.FetchOptions) (consolidators.SeriesFetchResult, Cleanup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchCompressed", arg0, arg1, arg2)
- ret0, _ := ret[0].(SeriesFetchResult)
+ ret0, _ := ret[0].(consolidators.SeriesFetchResult)
ret1, _ := ret[1].(Cleanup)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
@@ -176,10 +177,10 @@ func (mr *MockStorageMockRecorder) Name() *gomock.Call {
}
// SearchCompressed mocks base method
-func (m *MockStorage) SearchCompressed(arg0 context.Context, arg1 *storage.FetchQuery, arg2 *storage.FetchOptions) (TagResult, Cleanup, error) {
+func (m *MockStorage) SearchCompressed(arg0 context.Context, arg1 *storage.FetchQuery, arg2 *storage.FetchOptions) (consolidators.TagResult, Cleanup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchCompressed", arg0, arg1, arg2)
- ret0, _ := ret[0].(TagResult)
+ ret0, _ := ret[0].(consolidators.TagResult)
ret1, _ := ret[1].(Cleanup)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
diff --git a/src/query/storage/m3/storage.go b/src/query/storage/m3/storage.go
index de39418b8d..e3a9084a71 100644
--- a/src/query/storage/m3/storage.go
+++ b/src/query/storage/m3/storage.go
@@ -28,11 +28,14 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/errors"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/tracepoint"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/query/ts/m3db"
@@ -51,25 +54,6 @@ var (
errNoNamespacesConfigured = goerrors.New("no namespaces configured")
)
-type queryFanoutType uint
-
-const (
- namespaceInvalid queryFanoutType = iota
- namespaceCoversAllQueryRange
- namespaceCoversPartialQueryRange
-)
-
-func (t queryFanoutType) String() string {
- switch t {
- case namespaceCoversAllQueryRange:
- return "coversAllQueryRange"
- case namespaceCoversPartialQueryRange:
- return "coversPartialQueryRange"
- default:
- return "unknown"
- }
-}
-
type m3storage struct {
clusters Clusters
opts m3db.Options
@@ -108,7 +92,8 @@ func (s *m3storage) FetchProm(
query *storage.FetchQuery,
options *storage.FetchOptions,
) (storage.PromResult, error) {
- accumulator, err := s.fetchCompressed(ctx, query, options)
+ queryOptions := storage.FetchOptionsToM3Options(options, query)
+ accumulator, err := s.fetchCompressed(ctx, query, options, queryOptions)
if err != nil {
return storage.PromResult{}, err
}
@@ -120,9 +105,8 @@ func (s *m3storage) FetchProm(
}
fetchResult, err := storage.SeriesIteratorsToPromResult(
- result.SeriesIterators,
+ result,
s.opts.ReadWorkerPool(),
- result.Metadata,
options.Enforcer,
s.opts.TagOptions(),
)
@@ -137,7 +121,7 @@ func (s *m3storage) FetchProm(
// FetchResultToBlockResult converts an encoded SeriesIterator fetch result
// into blocks.
func FetchResultToBlockResult(
- result SeriesFetchResult,
+ result consolidators.SeriesFetchResult,
query *storage.FetchQuery,
options *storage.FetchOptions,
opts m3db.Options,
@@ -161,9 +145,8 @@ func FetchResultToBlockResult(
}
blocks, err := m3db.ConvertM3DBSeriesIterators(
- result.SeriesIterators,
+ result,
bounds,
- result.Metadata,
opts,
)
@@ -202,10 +185,11 @@ func (s *m3storage) FetchCompressed(
ctx context.Context,
query *storage.FetchQuery,
options *storage.FetchOptions,
-) (SeriesFetchResult, Cleanup, error) {
- accumulator, err := s.fetchCompressed(ctx, query, options)
+) (consolidators.SeriesFetchResult, Cleanup, error) {
+ queryOptions := storage.FetchOptionsToM3Options(options, query)
+ accumulator, err := s.fetchCompressed(ctx, query, options, queryOptions)
if err != nil {
- return SeriesFetchResult{
+ return consolidators.SeriesFetchResult{
Metadata: block.NewResultMetadata(),
}, noop, err
}
@@ -216,6 +200,24 @@ func (s *m3storage) FetchCompressed(
return result, noop, err
}
+ if processor := s.opts.SeriesIteratorProcessor(); processor != nil {
+ _, span, sampled := xcontext.StartSampledTraceSpan(ctx,
+ tracepoint.FetchCompressedInspectSeries)
+ iters := result.SeriesIterators()
+ if err := processor.InspectSeries(ctx, iters); err != nil {
+ s.logger.Error("error inspecting series", zap.Error(err))
+ }
+ if sampled {
+ span.LogFields(
+ log.String("query", query.Raw),
+ log.String("start", query.Start.String()),
+ log.String("end", query.End.String()),
+ log.String("interval", query.Interval.String()),
+ )
+ }
+ span.Finish()
+ }
+
if options.IncludeResolution {
resolutions := make([]int64, 0, len(attrs))
for _, attr := range attrs {
@@ -233,7 +235,8 @@ func (s *m3storage) fetchCompressed(
ctx context.Context,
query *storage.FetchQuery,
options *storage.FetchOptions,
-) (MultiFetchResult, error) {
+ queryOptions index.QueryOptions,
+) (consolidators.MultiFetchResult, error) {
if err := options.BlockType.Validate(); err != nil {
// This is an invariant error; should not be able to get to here.
return nil, instrument.InvariantErrorf("invalid block type on "+
@@ -268,10 +271,17 @@ func (s *m3storage) fetchCompressed(
return nil, err
}
- debugLog := s.logger.Check(zapcore.DebugLevel,
- "query resolved cluster namespace, will use most granular per result")
- if debugLog != nil {
+ if s.logger.Core().Enabled(zapcore.DebugLevel) {
for _, n := range namespaces {
+ // NB(r): Need to perform log on inner loop, cannot reuse a
+ // checked entry returned from logger.Check(...).
+ // Will see: "Unsafe CheckedEntry re-use near Entry ..." otherwise.
+ debugLog := s.logger.Check(zapcore.DebugLevel,
+ "query resolved cluster namespace, will use most granular per result")
+ if debugLog == nil {
+ continue
+ }
+
debugLog.Write(zap.String("query", query.Raw),
zap.String("m3query", m3query.String()),
zap.Time("start", query.Start),
@@ -281,15 +291,11 @@ func (s *m3storage) fetchCompressed(
zap.String("type", n.Options().Attributes().MetricsType.String()),
zap.String("retention", n.Options().Attributes().Retention.String()),
zap.String("resolution", n.Options().Attributes().Resolution.String()),
- zap.Bool("remote", options.Remote),
- )
+ zap.Bool("remote", options.Remote))
}
}
- var (
- opts = storage.FetchOptionsToM3Options(options, query)
- wg sync.WaitGroup
- )
+ var wg sync.WaitGroup
if len(namespaces) == 0 {
return nil, errNoNamespacesConfigured
}
@@ -299,18 +305,21 @@ func (s *m3storage) fetchCompressed(
return nil, fmt.Errorf("unable to retrieve iterator pools: %v", err)
}
- result := newMultiFetchResult(fanout, pools)
+ matchOpts := s.opts.SeriesConsolidationMatchOptions()
+ tagOpts := s.opts.TagOptions()
+ result := consolidators.NewMultiFetchResult(fanout, pools, matchOpts, tagOpts)
for _, namespace := range namespaces {
namespace := namespace // Capture var
wg.Add(1)
go func() {
+ defer wg.Done()
_, span, sampled := xcontext.StartSampledTraceSpan(ctx,
tracepoint.FetchCompressedFetchTagged)
defer span.Finish()
session := namespace.Session()
namespaceID := namespace.NamespaceID()
- iters, metadata, err := session.FetchTagged(namespaceID, m3query, opts)
+ iters, metadata, err := session.FetchTagged(namespaceID, m3query, queryOptions)
if err == nil && sampled {
span.LogFields(
log.String("namespace", namespaceID.String()),
@@ -323,15 +332,9 @@ func (s *m3storage) fetchCompressed(
blockMeta := block.NewResultMetadata()
blockMeta.Exhaustive = metadata.Exhaustive
- fetchResult := SeriesFetchResult{
- SeriesIterators: iters,
- Metadata: blockMeta,
- }
-
// Ignore error from getting iterator pools, since operation
// will not be dramatically impacted if pools is nil
- result.Add(fetchResult, namespace.Options().Attributes(), err)
- wg.Done()
+ result.Add(iters, blockMeta, namespace.Options().Attributes(), err)
}()
}
@@ -380,7 +383,7 @@ func (s *m3storage) CompleteTagsCompressed(
ctx context.Context,
query *storage.CompleteTagsQuery,
options *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
return s.CompleteTags(ctx, query, options)
}
@@ -388,7 +391,7 @@ func (s *m3storage) CompleteTags(
ctx context.Context,
query *storage.CompleteTagsQuery,
options *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
// Check if the query was interrupted.
select {
case <-ctx.Done():
@@ -409,7 +412,8 @@ func (s *m3storage) CompleteTags(
var (
nameOnly = query.CompleteNameOnly
namespaces = s.clusters.ClusterNamespaces()
- accumulatedTags = storage.NewCompleteTagsResultBuilder(nameOnly)
+ tagOpts = s.opts.TagOptions()
+ accumulatedTags = consolidators.NewCompleteTagsResultBuilder(nameOnly, tagOpts)
multiErr syncMultiErrs
wg sync.WaitGroup
)
@@ -480,7 +484,7 @@ func (s *m3storage) CompleteTags(
aggIterators = append(aggIterators, aggTagIter)
mu.Unlock()
- completedTags := make([]storage.CompletedTag, 0, aggTagIter.Remaining())
+ completedTags := make([]consolidators.CompletedTag, 0, aggTagIter.Remaining())
for aggTagIter.Next() {
name, values := aggTagIter.Current()
tagValues := make([][]byte, 0, values.Remaining())
@@ -493,7 +497,7 @@ func (s *m3storage) CompleteTags(
return
}
- completedTags = append(completedTags, storage.CompletedTag{
+ completedTags = append(completedTags, consolidators.CompletedTag{
Name: name.Bytes(),
Values: tagValues,
})
@@ -506,7 +510,7 @@ func (s *m3storage) CompleteTags(
blockMeta := block.NewResultMetadata()
blockMeta.Exhaustive = metadata.Exhaustive
- result := &storage.CompleteTagsResult{
+ result := &consolidators.CompleteTagsResult{
CompleteNameOnly: query.CompleteNameOnly,
CompletedTags: completedTags,
Metadata: blockMeta,
@@ -531,9 +535,9 @@ func (s *m3storage) SearchCompressed(
ctx context.Context,
query *storage.FetchQuery,
options *storage.FetchOptions,
-) (TagResult, Cleanup, error) {
+) (consolidators.TagResult, Cleanup, error) {
// Check if the query was interrupted.
- tagResult := TagResult{
+ tagResult := consolidators.TagResult{
Metadata: block.NewResultMetadata(),
}
@@ -551,7 +555,7 @@ func (s *m3storage) SearchCompressed(
var (
namespaces = s.clusters.ClusterNamespaces()
m3opts = storage.FetchOptionsToM3Options(options, query)
- result = NewMultiFetchTagsResult()
+ result = consolidators.NewMultiFetchTagsResult(s.opts.TagOptions())
wg sync.WaitGroup
)
@@ -622,19 +626,20 @@ func (s *m3storage) Write(
var (
// TODO: Pool this once an ident pool is setup. We will have
// to stop calling NoFinalize() below if we do that.
- idBuf = query.Tags.ID()
- id = ident.BytesID(idBuf)
+ tags = query.Tags()
+ datapoints = query.Datapoints()
+ idBuf = tags.ID()
+ id = ident.BytesID(idBuf)
)
// Set id to NoFinalize to avoid cloning it in write operations
id.NoFinalize()
- tagIterator := storage.TagsToIdentTagIterator(query.Tags)
+ tagIterator := storage.TagsToIdentTagIterator(tags)
- if len(query.Datapoints) == 1 {
+ if len(datapoints) == 1 {
// Special case single datapoint because it is common and we
// can avoid the overhead of a waitgroup, goroutine, multierr,
// iterator duplication etc.
- return s.writeSingle(
- ctx, query, query.Datapoints[0], id, tagIterator)
+ return s.writeSingle(ctx, query, datapoints[0], id, tagIterator)
}
var (
@@ -642,7 +647,7 @@ func (s *m3storage) Write(
multiErr syncMultiErrs
)
- for _, datapoint := range query.Datapoints {
+ for _, datapoint := range datapoints {
tagIter := tagIterator.Duplicate()
// capture var
datapoint := datapoint
@@ -681,11 +686,11 @@ func (s *m3storage) writeSingle(
err error
)
- attributes := query.Attributes
+ attributes := query.Attributes()
switch attributes.MetricsType {
- case storage.UnaggregatedMetricsType:
+ case storagemetadata.UnaggregatedMetricsType:
namespace = s.clusters.UnaggregatedClusterNamespace()
- case storage.AggregatedMetricsType:
+ case storagemetadata.AggregatedMetricsType:
attrs := RetentionResolution{
Retention: attributes.Retention,
Resolution: attributes.Resolution,
@@ -708,5 +713,5 @@ func (s *m3storage) writeSingle(
namespaceID := namespace.NamespaceID()
session := namespace.Session()
return session.WriteTagged(namespaceID, identID, iterator,
- datapoint.Timestamp, datapoint.Value, query.Unit, query.Annotation)
+ datapoint.Timestamp, datapoint.Value, query.Unit(), query.Annotation())
}
diff --git a/src/query/storage/m3/storage_test.go b/src/query/storage/m3/storage_test.go
index 566142a6d3..3fcb2ae847 100644
--- a/src/query/storage/m3/storage_test.go
+++ b/src/query/storage/m3/storage_test.go
@@ -32,6 +32,8 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/test/seriesiter"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/query/ts/m3db"
@@ -134,7 +136,7 @@ func newTestStorage(t *testing.T, clusters Clusters) storage.Storage {
SetWriteWorkerPool(writePool).
SetLookbackDuration(time.Minute).
SetTagOptions(tagOpts)
- storage, err := NewStorage(clusters, opts, instrument.NewOptions())
+ storage, err := NewStorage(clusters, opts, instrument.NewTestOptions(t))
require.NoError(t, err)
return storage
}
@@ -159,28 +161,32 @@ func newFetchReq() *storage.FetchQuery {
}
}
-func newWriteQuery() *storage.WriteQuery {
+func newWriteQuery(t *testing.T) *storage.WriteQuery {
tags := models.EmptyTags().AddTags([]models.Tag{
{Name: []byte("foo"), Value: []byte("bar")},
{Name: []byte("biz"), Value: []byte("baz")},
})
- datapoints := ts.Datapoints{{
- Timestamp: time.Now(),
- Value: 1.0,
- },
- {
- Timestamp: time.Now().Add(-10 * time.Second),
- Value: 2.0,
- }}
- return &storage.WriteQuery{
- Tags: tags,
- Unit: xtime.Millisecond,
- Datapoints: datapoints,
- Attributes: storage.Attributes{
- MetricsType: storage.UnaggregatedMetricsType,
+ q, err := storage.NewWriteQuery(storage.WriteQueryOptions{
+ Tags: tags,
+ Unit: xtime.Millisecond,
+ Datapoints: ts.Datapoints{
+ {
+ Timestamp: time.Now(),
+ Value: 1.0,
+ },
+ {
+ Timestamp: time.Now().Add(-10 * time.Second),
+ Value: 2.0,
+ },
},
- }
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
+ },
+ })
+ require.NoError(t, err)
+
+ return q
}
func setupLocalWrite(t *testing.T, ctrl *gomock.Controller) storage.Storage {
@@ -203,7 +209,7 @@ func TestLocalWriteSuccess(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
store := setupLocalWrite(t, ctrl)
- writeQuery := newWriteQuery()
+ writeQuery := newWriteQuery(t)
err := store.Write(context.TODO(), writeQuery)
assert.NoError(t, err)
assert.NoError(t, store.Close())
@@ -213,14 +219,20 @@ func TestLocalWriteAggregatedNoClusterNamespaceError(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
store, _ := setup(t, ctrl)
- writeQuery := newWriteQuery()
+
+ opts := newWriteQuery(t).Options()
+
// Use unsupported retention/resolution
- writeQuery.Attributes = storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ opts.Attributes = storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 1234,
Resolution: 5678,
}
- err := store.Write(context.TODO(), writeQuery)
+
+ writeQuery, err := storage.NewWriteQuery(opts)
+ require.NoError(t, err)
+
+ err = store.Write(context.TODO(), writeQuery)
assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "no configured cluster namespace"),
fmt.Sprintf("unexpected error string: %v", err.Error()))
@@ -230,13 +242,19 @@ func TestLocalWriteAggregatedInvalidMetricsTypeError(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
store, _ := setup(t, ctrl)
- writeQuery := newWriteQuery()
+
+ opts := newWriteQuery(t).Options()
+
// Use unsupported retention/resolution
- writeQuery.Attributes = storage.Attributes{
- MetricsType: storage.MetricsType(math.MaxUint64),
+ opts.Attributes = storagemetadata.Attributes{
+ MetricsType: storagemetadata.MetricsType(math.MaxUint64),
Retention: 30 * 24 * time.Hour,
}
- err := store.Write(context.TODO(), writeQuery)
+
+ writeQuery, err := storage.NewWriteQuery(opts)
+ require.NoError(t, err)
+
+ err = store.Write(context.TODO(), writeQuery)
assert.Error(t, err)
assert.True(t, strings.Contains(err.Error(), "invalid write request"),
fmt.Sprintf("unexpected error string: %v", err.Error()))
@@ -247,18 +265,23 @@ func TestLocalWriteAggregatedSuccess(t *testing.T) {
defer ctrl.Finish()
store, sessions := setup(t, ctrl)
- writeQuery := newWriteQuery()
- writeQuery.Attributes = storage.Attributes{
- MetricsType: storage.AggregatedMetricsType,
+ opts := newWriteQuery(t).Options()
+
+ // Use unsupported retention/resolution
+ opts.Attributes = storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
Retention: 30 * 24 * time.Hour,
Resolution: time.Minute,
}
+ writeQuery, err := storage.NewWriteQuery(opts)
+ require.NoError(t, err)
+
session := sessions.aggregated1MonthRetention1MinuteResolution
session.EXPECT().WriteTagged(gomock.Any(), gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(len(writeQuery.Datapoints))
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(len(writeQuery.Datapoints()))
- err := store.Write(context.TODO(), writeQuery)
+ err = store.Write(context.TODO(), writeQuery)
assert.NoError(t, err)
assert.NoError(t, store.Close())
}
@@ -266,6 +289,7 @@ func TestLocalWriteAggregatedSuccess(t *testing.T) {
func TestLocalRead(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
+
store, sessions := setup(t, ctrl)
testTags := seriesiter.GenerateTag()
@@ -292,7 +316,8 @@ func TestLocalReadExceedsRetention(t *testing.T) {
session.EXPECT().FetchTagged(gomock.Any(), gomock.Any(), gomock.Any()).
Return(seriesiter.NewMockSeriesIters(ctrl, testTag, 1, 2),
testFetchResponseMetadata, nil)
- session.EXPECT().IteratorPools().Return(nil, nil).AnyTimes()
+ session.EXPECT().IteratorPools().
+ Return(newTestIteratorPools(ctrl), nil).AnyTimes()
searchReq := newFetchReq()
searchReq.Start = time.Now().Add(-2 * testLongestRetention)
@@ -304,7 +329,7 @@ func TestLocalReadExceedsRetention(t *testing.T) {
func buildFetchOpts() *storage.FetchOptions {
opts := storage.NewFetchOptions()
- opts.Limit = 100
+ opts.SeriesLimit = 100
return opts
}
@@ -685,7 +710,7 @@ func TestLocalCompleteTagsSuccess(t *testing.T) {
require.False(t, result.CompleteNameOnly)
require.Equal(t, 3, len(result.CompletedTags))
// NB: expected will be sorted alphabetically
- expected := []storage.CompletedTag{
+ expected := []consolidators.CompletedTag{
{
Name: []byte("aba"),
Values: [][]byte{[]byte("quz")},
@@ -744,7 +769,7 @@ func TestLocalCompleteTagsSuccessFinalize(t *testing.T) {
require.False(t, result.CompleteNameOnly)
require.Equal(t, 1, len(result.CompletedTags))
// NB: expected will be sorted alphabetically
- expected := []storage.CompletedTag{
+ expected := []consolidators.CompletedTag{
{
Name: []byte("name"),
Values: [][]byte{[]byte("value")},
@@ -764,11 +789,12 @@ func TestInvalidBlockTypes(t *testing.T) {
s, err := NewStorage(nil, opts, instrument.NewOptions())
require.NoError(t, err)
+ query := &storage.FetchQuery{}
fetchOpts := &storage.FetchOptions{BlockType: models.TypeDecodedBlock}
- _, err = s.FetchBlocks(context.TODO(), nil, fetchOpts)
+ _, err = s.FetchBlocks(context.TODO(), query, fetchOpts)
assert.Error(t, err)
fetchOpts.BlockType = models.TypeMultiBlock
- _, err = s.FetchBlocks(context.TODO(), nil, fetchOpts)
+ _, err = s.FetchBlocks(context.TODO(), query, fetchOpts)
assert.Error(t, err)
}
diff --git a/src/query/storage/config.go b/src/query/storage/m3/storagemetadata/config.go
similarity index 98%
rename from src/query/storage/config.go
rename to src/query/storage/m3/storagemetadata/config.go
index f16c83f7a2..2e7a96eff4 100644
--- a/src/query/storage/config.go
+++ b/src/query/storage/m3/storagemetadata/config.go
@@ -18,9 +18,11 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package storage
+package storagemetadata
-import "fmt"
+import (
+ "fmt"
+)
var (
validMetricsTypes = []MetricsType{
diff --git a/src/query/storage/config_test.go b/src/query/storage/m3/storagemetadata/config_test.go
similarity index 98%
rename from src/query/storage/config_test.go
rename to src/query/storage/m3/storagemetadata/config_test.go
index 8b73a7e351..53e75b9241 100644
--- a/src/query/storage/config_test.go
+++ b/src/query/storage/m3/storagemetadata/config_test.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package storage
+package storagemetadata
import (
"fmt"
diff --git a/src/query/storage/m3/storagemetadata/types.go b/src/query/storage/m3/storagemetadata/types.go
new file mode 100644
index 0000000000..563f04ead2
--- /dev/null
+++ b/src/query/storage/m3/storagemetadata/types.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package storagemetadata
+
+import (
+ "time"
+)
+
+// MetricsType is a type of stored metrics.
+type MetricsType uint
+
+const (
+ // UnknownMetricsType is the unknown metrics type and is invalid.
+ UnknownMetricsType MetricsType = iota
+ // UnaggregatedMetricsType is an unaggregated metrics type.
+ UnaggregatedMetricsType
+ // AggregatedMetricsType is an aggregated metrics type.
+ AggregatedMetricsType
+
+ // DefaultMetricsType is the default metrics type value.
+ DefaultMetricsType = UnaggregatedMetricsType
+)
+
+// Attributes is a set of stored metrics attributes.
+type Attributes struct {
+ // MetricsType indicates the type of namespace this metric originated from.
+ MetricsType MetricsType
+ // Retention indicates the retention of the namespace this metric originated
+ // from.
+ Retention time.Duration
+ // Resolution indicates the retention of the namespace this metric originated
+ // from.
+ Resolution time.Duration
+}
+
+// Validate validates a storage attributes.
+func (a Attributes) Validate() error {
+ return ValidateMetricsType(a.MetricsType)
+}
diff --git a/src/query/storage/m3/types.go b/src/query/storage/m3/types.go
index 5548ded95a..be9affa3d3 100644
--- a/src/query/storage/m3/types.go
+++ b/src/query/storage/m3/types.go
@@ -23,11 +23,8 @@ package m3
import (
"context"
- "github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/encoding"
- "github.com/m3db/m3/src/query/block"
genericstorage "github.com/m3db/m3/src/query/storage"
- "github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
)
// Cleanup is a cleanup function to be called after resources are freed.
@@ -50,81 +47,19 @@ type Querier interface {
ctx context.Context,
query *genericstorage.FetchQuery,
options *genericstorage.FetchOptions,
- ) (SeriesFetchResult, Cleanup, error)
+ ) (consolidators.SeriesFetchResult, Cleanup, error)
// SearchCompressed fetches matching tags based on a query.
SearchCompressed(
ctx context.Context,
query *genericstorage.FetchQuery,
options *genericstorage.FetchOptions,
- ) (TagResult, Cleanup, error)
+ ) (consolidators.TagResult, Cleanup, error)
// CompleteTagsCompressed returns autocompleted tag results.
CompleteTagsCompressed(
ctx context.Context,
query *genericstorage.CompleteTagsQuery,
options *genericstorage.FetchOptions,
- ) (*genericstorage.CompleteTagsResult, error)
-}
-
-// SeriesFetchResult is a fetch result with associated metadata.
-type SeriesFetchResult struct {
- // Metadata is the set of metadata associated with the fetch result.
- Metadata block.ResultMetadata
- // SeriesIterators is the list of series iterators for the result.
- SeriesIterators encoding.SeriesIterators
-}
-
-// MultiFetchResult is a deduping accumalator for series iterators
-// that allows merging using a given strategy.
-type MultiFetchResult interface {
- // Add appends series fetch results to the accumulator.
- Add(
- fetchResult SeriesFetchResult,
- attrs genericstorage.Attributes,
- err error,
- )
-
- // FinalResult returns a series fetch result containing deduplicated series
- // iterators and their metadata, and any errors encountered.
- FinalResult() (SeriesFetchResult, error)
-
- // FinalResult returns a series fetch result containing deduplicated series
- // iterators and their metadata, as well as any attributes corresponding to
- // these results, and any errors encountered.
- FinalResultWithAttrs() (SeriesFetchResult, []genericstorage.Attributes, error)
-
- // Close releases all resources held by this accumulator.
- Close() error
-}
-
-// TagResult is a fetch tag result with associated metadata.
-type TagResult struct {
- // Metadata is the set of metadata associated with the fetch result.
- Metadata block.ResultMetadata
- // Tags is the list of tags for the result.
- Tags []MultiTagResult
-}
-
-// MultiFetchTagsResult is a deduping accumalator for tag iterators.
-type MultiFetchTagsResult interface {
- // Add adds tagged ID iterators to the accumulator.
- Add(
- newIterator client.TaggedIDsIterator,
- meta block.ResultMetadata,
- err error,
- )
- // FinalResult returns a deduped list of tag iterators with
- // corresponding series IDs.
- FinalResult() (TagResult, error)
- // Close releases all resources held by this accumulator.
- Close() error
-}
-
-// MultiTagResult represents a tag iterator with its string ID.
-type MultiTagResult struct {
- // ID is the series ID.
- ID ident.ID
- // Iter is the tag iterator for the series.
- Iter ident.TagIterator
+ ) (*consolidators.CompleteTagsResult, error)
}
diff --git a/src/query/storage/mock/storage.go b/src/query/storage/mock/storage.go
index ec3dd005ce..eb150cd0bf 100644
--- a/src/query/storage/mock/storage.go
+++ b/src/query/storage/mock/storage.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
)
// Storage implements storage.Storage and provides methods to help
@@ -40,7 +41,7 @@ type Storage interface {
SetFetchResult(*storage.FetchResult, error)
SetFetchResults(...*storage.FetchResult)
SetSearchSeriesResult(*storage.SearchResults, error)
- SetCompleteTagsResult(*storage.CompleteTagsResult, error)
+ SetCompleteTagsResult(*consolidators.CompleteTagsResult, error)
SetWriteResult(error)
SetFetchBlocksResult(block.Result, error)
SetCloseResult(error)
@@ -71,7 +72,7 @@ type mockStorage struct {
err error
}
completeTagsResult struct {
- result *storage.CompleteTagsResult
+ result *consolidators.CompleteTagsResult
err error
}
closeResult struct {
@@ -133,7 +134,7 @@ func (s *mockStorage) SetFetchBlocksResult(result block.Result, err error) {
s.fetchBlocksResult.err = err
}
-func (s *mockStorage) SetCompleteTagsResult(result *storage.CompleteTagsResult, err error) {
+func (s *mockStorage) SetCompleteTagsResult(result *consolidators.CompleteTagsResult, err error) {
s.Lock()
defer s.Unlock()
s.completeTagsResult.result = result
@@ -209,7 +210,7 @@ func (s *mockStorage) CompleteTags(
ctx context.Context,
query *storage.CompleteTagsQuery,
opts *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
s.RLock()
defer s.RUnlock()
s.lastFetchOptions = opts
diff --git a/src/query/storage/noop_storage.go b/src/query/storage/noop_storage.go
new file mode 100644
index 0000000000..9a19367f82
--- /dev/null
+++ b/src/query/storage/noop_storage.go
@@ -0,0 +1,89 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package storage
+
+import (
+ "context"
+ "errors"
+
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+)
+
+var errNoopClient = errors.New("operation not valid for noop client")
+
+// NewNoopStorage returns a fake implementation of Storage that rejects all
+// writes and returns errors for all queries.
+func NewNoopStorage() Storage {
+ return noopStorage{}
+}
+
+type noopStorage struct{}
+
+func (noopStorage) Fetch(ctx context.Context, query *FetchQuery, options *FetchOptions) (*FetchResult, error) {
+ return nil, errNoopClient
+}
+
+func (noopStorage) FetchProm(ctx context.Context, query *FetchQuery, options *FetchOptions) (PromResult, error) {
+ return PromResult{}, errNoopClient
+}
+
+// FetchBlocks fetches timeseries as blocks based on a query.
+func (noopStorage) FetchBlocks(ctx context.Context, query *FetchQuery, options *FetchOptions) (block.Result, error) {
+ return block.Result{}, errNoopClient
+}
+
+// SearchSeries returns series IDs matching the current query.
+func (noopStorage) SearchSeries(ctx context.Context, query *FetchQuery, options *FetchOptions) (*SearchResults, error) {
+ return nil, errNoopClient
+}
+
+// CompleteTags returns autocompleted tag results.
+func (noopStorage) CompleteTags(ctx context.Context, query *CompleteTagsQuery, options *FetchOptions) (*consolidators.CompleteTagsResult, error) {
+ return nil, errNoopClient
+}
+
+// Write writes a batched set of datapoints to storage based on the provided
+// query.
+func (noopStorage) Write(ctx context.Context, query *WriteQuery) error {
+ return errNoopClient
+}
+
+// Type identifies the type of the underlying
+func (noopStorage) Type() Type {
+ return TypeLocalDC
+}
+
+// Close is used to close the underlying storage and free up resources.
+func (noopStorage) Close() error {
+ return errNoopClient
+}
+
+// ErrorBehavior dictates what fanout storage should do when this storage
+// encounters an error.
+func (noopStorage) ErrorBehavior() ErrorBehavior {
+ return BehaviorWarn
+}
+
+// Name gives the plaintext name for this storage, used for logging purposes.
+func (noopStorage) Name() string {
+ return "noopStorage"
+}
diff --git a/src/query/storage/prom_converter.go b/src/query/storage/prom_converter.go
index 9dc9a70ed0..4089dd7746 100644
--- a/src/query/storage/prom_converter.go
+++ b/src/query/storage/prom_converter.go
@@ -24,51 +24,23 @@ import (
"sync"
"github.com/m3db/m3/src/dbnode/encoding"
- "github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
xcost "github.com/m3db/m3/src/x/cost"
xerrors "github.com/m3db/m3/src/x/errors"
- "github.com/m3db/m3/src/x/ident"
xsync "github.com/m3db/m3/src/x/sync"
)
const initRawFetchAllocSize = 32
-func cloneBytes(b []byte) []byte {
- return append(make([]byte, 0, len(b)), b...)
-}
-
-func tagIteratorToLabels(
- identTags ident.TagIterator,
-) ([]prompb.Label, error) {
- labels := make([]prompb.Label, 0, identTags.Remaining())
- for identTags.Next() {
- identTag := identTags.Current()
- labels = append(labels, prompb.Label{
- Name: cloneBytes(identTag.Name.Bytes()),
- Value: cloneBytes(identTag.Value.Bytes()),
- })
- }
-
- if err := identTags.Err(); err != nil {
- return nil, err
- }
-
- return labels, nil
-}
-
func iteratorToPromResult(
iter encoding.SeriesIterator,
+ tags models.Tags,
enforcer cost.ChainedEnforcer,
tagOptions models.TagOptions,
) (*prompb.TimeSeries, error) {
- labels, err := tagIteratorToLabels(iter.Tags())
- if err != nil {
- return nil, err
- }
-
samples := make([]prompb.Sample, 0, initRawFetchAllocSize)
for iter.Next() {
dp, _, _ := iter.Current()
@@ -88,21 +60,26 @@ func iteratorToPromResult(
}
return &prompb.TimeSeries{
- Labels: labels,
+ Labels: TagsToPromLabels(tags),
Samples: samples,
}, nil
}
// Fall back to sequential decompression if unable to decompress concurrently.
func toPromSequentially(
- iters []encoding.SeriesIterator,
+ fetchResult consolidators.SeriesFetchResult,
enforcer cost.ChainedEnforcer,
tagOptions models.TagOptions,
) (PromResult, error) {
- seriesList := make([]*prompb.TimeSeries, 0, len(iters))
+ count := fetchResult.Count()
+ seriesList := make([]*prompb.TimeSeries, 0, count)
+ for i := 0; i < count; i++ {
+ iter, tags, err := fetchResult.IterTagsAtIndex(i, tagOptions)
+ if err != nil {
+ return PromResult{}, err
+ }
- for _, iter := range iters {
- series, err := iteratorToPromResult(iter, enforcer, tagOptions)
+ series, err := iteratorToPromResult(iter, tags, enforcer, tagOptions)
if err != nil {
return PromResult{}, err
}
@@ -120,25 +97,31 @@ func toPromSequentially(
}
func toPromConcurrently(
- iters []encoding.SeriesIterator,
+ fetchResult consolidators.SeriesFetchResult,
readWorkerPool xsync.PooledWorkerPool,
enforcer cost.ChainedEnforcer,
tagOptions models.TagOptions,
) (PromResult, error) {
+ count := fetchResult.Count()
var (
- seriesList = make([]*prompb.TimeSeries, len(iters))
+ seriesList = make([]*prompb.TimeSeries, count)
wg sync.WaitGroup
multiErr xerrors.MultiError
mu sync.Mutex
)
- for i, iter := range iters {
- i, iter := i, iter
+ for i := 0; i < count; i++ {
+ i := i
+ iter, tags, err := fetchResult.IterTagsAtIndex(i, tagOptions)
+ if err != nil {
+ return PromResult{}, err
+ }
+
wg.Add(1)
readWorkerPool.Go(func() {
defer wg.Done()
- series, err := iteratorToPromResult(iter, enforcer, tagOptions)
+ series, err := iteratorToPromResult(iter, tags, enforcer, tagOptions)
if err != nil {
mu.Lock()
multiErr = multiErr.Add(err)
@@ -170,36 +153,37 @@ func toPromConcurrently(
}
func seriesIteratorsToPromResult(
- seriesIterators encoding.SeriesIterators,
+ fetchResult consolidators.SeriesFetchResult,
readWorkerPool xsync.PooledWorkerPool,
enforcer cost.ChainedEnforcer,
tagOptions models.TagOptions,
) (PromResult, error) {
- defer seriesIterators.Close()
-
- iters := seriesIterators.Iters()
if readWorkerPool == nil {
- return toPromSequentially(iters, enforcer, tagOptions)
+ return toPromSequentially(fetchResult, enforcer, tagOptions)
}
- return toPromConcurrently(iters, readWorkerPool, enforcer, tagOptions)
+ return toPromConcurrently(fetchResult, readWorkerPool, enforcer, tagOptions)
}
// SeriesIteratorsToPromResult converts raw series iterators directly to a
// Prometheus-compatible result.
func SeriesIteratorsToPromResult(
- seriesIterators encoding.SeriesIterators,
+ fetchResult consolidators.SeriesFetchResult,
readWorkerPool xsync.PooledWorkerPool,
- metadata block.ResultMetadata,
enforcer cost.ChainedEnforcer,
tagOptions models.TagOptions,
) (PromResult, error) {
+ defer fetchResult.Close()
+ if err := fetchResult.Verify(); err != nil {
+ return PromResult{}, err
+ }
+
if enforcer == nil {
enforcer = cost.NoopChainedEnforcer()
}
- promResult, err := seriesIteratorsToPromResult(seriesIterators,
+ promResult, err := seriesIteratorsToPromResult(fetchResult,
readWorkerPool, enforcer, tagOptions)
- promResult.Metadata = metadata
+ promResult.Metadata = fetchResult.Metadata
return promResult, err
}
diff --git a/src/query/storage/prom_converter_test.go b/src/query/storage/prom_converter_test.go
index e640cb59dd..cb290c966d 100644
--- a/src/query/storage/prom_converter_test.go
+++ b/src/query/storage/prom_converter_test.go
@@ -30,6 +30,7 @@ import (
"github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/test/seriesiter"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/checked"
@@ -45,6 +46,27 @@ import (
"github.com/stretchr/testify/require"
)
+func fr(
+ t *testing.T,
+ its encoding.SeriesIterators,
+ tags ...*models.Tags,
+) consolidators.SeriesFetchResult {
+ result, err := consolidators.
+ NewSeriesFetchResult(its, tags, block.NewResultMetadata())
+ assert.NoError(t, err)
+ return result
+}
+
+func makeTag(n, v string, count int) []*models.Tags {
+ tags := make([]*models.Tags, 0, count)
+ for i := 0; i < count; i++ {
+ t := models.EmptyTags().AddTag(models.Tag{Name: []byte(n), Value: []byte(v)})
+ tags = append(tags, &t)
+ }
+
+ return tags
+}
+
func verifyExpandPromSeries(
t *testing.T,
ctrl *gomock.Controller,
@@ -52,17 +74,17 @@ func verifyExpandPromSeries(
ex bool,
pools xsync.PooledWorkerPool,
) {
- testTags := seriesiter.GenerateTag()
- iters := seriesiter.NewMockSeriesIters(ctrl, testTags, num, 2)
-
+ iters := seriesiter.NewMockSeriesIters(ctrl, ident.Tag{}, num, 2)
+ fetchResult := fr(t, iters, makeTag("foo", "bar", num)...)
enforcer := cost.NewMockChainedEnforcer(ctrl)
enforcer.EXPECT().Add(xcost.Cost(2)).Times(num)
- results, err := SeriesIteratorsToPromResult(iters, pools,
- block.ResultMetadata{
- Exhaustive: ex,
- LocalOnly: true,
- Warnings: []block.Warning{block.Warning{Name: "foo", Message: "bar"}},
- }, enforcer, nil)
+ fetchResult.Metadata = block.ResultMetadata{
+ Exhaustive: ex,
+ LocalOnly: true,
+ Warnings: []block.Warning{block.Warning{Name: "foo", Message: "bar"}},
+ }
+
+ results, err := SeriesIteratorsToPromResult(fetchResult, pools, enforcer, nil)
assert.NoError(t, err)
require.NotNil(t, results)
@@ -74,8 +96,8 @@ func verifyExpandPromSeries(
require.Equal(t, len(ts), num)
expectedTags := []prompb.Label{
prompb.Label{
- Name: testTags.Name.Bytes(),
- Value: testTags.Value.Bytes(),
+ Name: []byte("foo"),
+ Value: []byte("bar"),
},
}
@@ -208,36 +230,13 @@ func setupTags(name, value string) (ident.Tags, overwrite) {
return tags, overwrite
}
-func TestTagIteratorToLabels(t *testing.T) {
- name := "foo"
- value := "bar"
- tags, overwrite := setupTags(name, value)
- tagIter := ident.NewTagsIterator(tags)
- labels, err := tagIteratorToLabels(tagIter)
- require.NoError(t, err)
-
- verifyTags := func() {
- require.Equal(t, 3, len(labels))
- assert.Equal(t, name, string(labels[0].GetName()))
- assert.Equal(t, value, string(labels[0].GetValue()))
- assert.Equal(t, name, string(labels[1].GetName()))
- assert.Equal(t, "", string(labels[1].GetValue()))
- assert.Equal(t, "", string(labels[2].GetName()))
- assert.Equal(t, value, string(labels[2].GetValue()))
- }
-
- verifyTags()
- overwrite()
- verifyTags()
-}
-
func TestDecodeIteratorsWithEmptySeries(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
name := "name"
now := time.Now()
- buildIter := func(val string, hasVal bool) encoding.SeriesIterator {
+ buildIter := func(val string, hasVal bool) *encoding.MockSeriesIterator {
iter := encoding.NewMockSeriesIterator(ctrl)
if hasVal {
@@ -255,13 +254,21 @@ func TestDecodeIteratorsWithEmptySeries(t *testing.T) {
}
tags := ident.NewMockTagIterator(ctrl)
- tags.EXPECT().Remaining().Return(1)
- tags.EXPECT().Next().Return(true)
- tags.EXPECT().Current().Return(tag)
- tags.EXPECT().Next().Return(false)
- tags.EXPECT().Err().Return(nil)
+ populateIter := func() {
+ gomock.InOrder(
+ tags.EXPECT().Remaining().Return(1),
+ tags.EXPECT().Next().Return(true),
+ tags.EXPECT().Current().Return(tag),
+ tags.EXPECT().Next().Return(false),
+ tags.EXPECT().Err().Return(nil),
+ tags.EXPECT().Rewind(),
+ )
+ }
+ populateIter()
iter.EXPECT().Tags().Return(tags)
+ iter.EXPECT().Close().MaxTimes(1)
+
return iter
}
@@ -283,7 +290,7 @@ func TestDecodeIteratorsWithEmptySeries(t *testing.T) {
}
}
- buildIters := func() encoding.SeriesIterators {
+ buildIters := func() consolidators.SeriesFetchResult {
iters := []encoding.SeriesIterator{
buildIter("foo", false),
buildIter("bar", true),
@@ -293,15 +300,13 @@ func TestDecodeIteratorsWithEmptySeries(t *testing.T) {
}
it := encoding.NewMockSeriesIterators(ctrl)
- it.EXPECT().Iters().Return(iters)
- it.EXPECT().Close()
- return it
+ it.EXPECT().Iters().Return(iters).AnyTimes()
+ it.EXPECT().Len().Return(len(iters)).AnyTimes()
+ return fr(t, it)
}
- md := block.NewResultMetadata()
opts := models.NewTagOptions()
-
- res, err := SeriesIteratorsToPromResult(buildIters(), nil, md, nil, opts)
+ res, err := SeriesIteratorsToPromResult(buildIters(), nil, nil, opts)
require.NoError(t, err)
verifyResult(t, res)
@@ -309,7 +314,7 @@ func TestDecodeIteratorsWithEmptySeries(t *testing.T) {
require.NoError(t, err)
pool.Init()
- res, err = SeriesIteratorsToPromResult(buildIters(), pool, md, nil, opts)
+ res, err = SeriesIteratorsToPromResult(buildIters(), pool, nil, opts)
require.NoError(t, err)
verifyResult(t, res)
}
diff --git a/src/query/storage/prometheus/context.go b/src/query/storage/prometheus/context.go
new file mode 100644
index 0000000000..4c1b205e1e
--- /dev/null
+++ b/src/query/storage/prometheus/context.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prometheus
+
+import (
+ "context"
+ "errors"
+
+ "github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/storage"
+)
+
+// ContextKey is the context key type.
+type ContextKey string
+
+const (
+ // FetchOptionsContextKey is the context key for fetch options.
+ FetchOptionsContextKey ContextKey = "fetch-options"
+
+ // BlockResultMetadataKey is the context key for block meta result.
+ BlockResultMetadataKey ContextKey = "block-meta-result"
+)
+
+// RemoteReadFlags is a set of flags for storage remote read requests.
+type RemoteReadFlags struct {
+ Limited bool
+}
+
+func fetchOptions(ctx context.Context) (*storage.FetchOptions, error) {
+ fetchOptions := ctx.Value(FetchOptionsContextKey)
+ if f, ok := fetchOptions.(*storage.FetchOptions); ok {
+ return f, nil
+ }
+ return nil, errors.New("fetch options not available")
+}
+
+func resultMetadata(ctx context.Context) (*block.ResultMetadata, error) {
+ value := ctx.Value(BlockResultMetadataKey)
+ if v, ok := value.(*block.ResultMetadata); ok {
+ return v, nil
+ }
+ return nil, errors.New("block result metadata not available")
+}
diff --git a/src/query/storage/prometheus/prometheus_storage.go b/src/query/storage/prometheus/prometheus_storage.go
new file mode 100644
index 0000000000..f74706aed9
--- /dev/null
+++ b/src/query/storage/prometheus/prometheus_storage.go
@@ -0,0 +1,323 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package prometheus
+
+import (
+ "context"
+ "sort"
+ "time"
+
+ "github.com/m3db/m3/src/query/generated/proto/prompb"
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/parser/promql"
+ "github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/pkg/labels"
+ promstorage "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/uber-go/tally"
+ "go.uber.org/zap"
+)
+
+type prometheusQueryable struct {
+ storage storage.Storage
+ scope tally.Scope
+ logger *zap.Logger
+}
+
+// PrometheusOptions are options to create a prometheus queryable backed by
+// a m3 storage.
+type PrometheusOptions struct {
+ Storage storage.Storage
+ InstrumentOptions instrument.Options
+}
+
+// NewPrometheusQueryable returns a new prometheus queryable backed by a m3
+// storage.
+func NewPrometheusQueryable(opts PrometheusOptions) promstorage.Queryable {
+ scope := opts.InstrumentOptions.MetricsScope().Tagged(map[string]string{"storage": "prometheus_storage"})
+ return &prometheusQueryable{
+ storage: opts.Storage,
+ scope: scope,
+ logger: opts.InstrumentOptions.Logger(),
+ }
+}
+
+func (o PrometheusOptions) validate() error {
+ if o.Storage == nil {
+ return errors.New("storage is not set")
+ }
+ if o.InstrumentOptions == nil {
+ return errors.New("instrument options not set")
+ }
+ return nil
+}
+
+// Querier returns a prometheus storage Querier.
+func (q *prometheusQueryable) Querier(
+ ctx context.Context,
+ mint, maxt int64,
+) (promstorage.Querier, error) {
+ return newQuerier(ctx, q.storage, q.logger), nil
+}
+
+type querier struct {
+ ctx context.Context
+ storage storage.Storage
+ logger *zap.Logger
+}
+
+func newQuerier(
+ ctx context.Context,
+ storage storage.Storage,
+ logger *zap.Logger,
+) promstorage.Querier {
+ return &querier{
+ ctx: ctx,
+ storage: storage,
+ logger: logger,
+ }
+}
+
+func (q *querier) Select(
+ sortSeries bool,
+ hints *promstorage.SelectHints,
+ labelMatchers ...*labels.Matcher,
+) (promstorage.SeriesSet, promstorage.Warnings, error) {
+ matchers, err := promql.LabelMatchersToModelMatcher(labelMatchers, models.NewTagOptions())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ query := &storage.FetchQuery{
+ TagMatchers: matchers,
+ Start: time.Unix(0, hints.Start*int64(time.Millisecond)),
+ End: time.Unix(0, hints.End*int64(time.Millisecond)),
+ Interval: time.Duration(hints.Step) * time.Millisecond,
+ }
+
+ // NB (@shreyas): The fetch options builder sets it up from the request
+ // which we do not have access to here.
+ fetchOptions, err := fetchOptions(q.ctx)
+ if err != nil {
+ q.logger.Error("fetch options not provided in context", zap.Error(err))
+ return nil, nil, err
+ }
+
+ result, err := q.storage.FetchProm(q.ctx, query, fetchOptions)
+ if err != nil {
+ return nil, nil, err
+ }
+ seriesSet := fromQueryResult(sortSeries, result.PromResult)
+ warnings := fromWarningStrings(result.Metadata.WarningStrings())
+
+ resultMetadataPtr, err := resultMetadata(q.ctx)
+ if err != nil {
+ q.logger.Error("result metadata not set in context")
+ return nil, nil, err
+ }
+ if resultMetadataPtr == nil {
+ err := errors.New("result metadata nil for context")
+ q.logger.Error(err.Error())
+ return nil, nil, err
+ }
+
+ *resultMetadataPtr = result.Metadata
+
+ return seriesSet, warnings, err
+}
+
+func (q *querier) LabelValues(name string) ([]string, promstorage.Warnings, error) {
+ // TODO (@shreyas): Implement this.
+ q.logger.Warn("calling unsupported LabelValues method")
+ return nil, nil, errors.New("not implemented")
+}
+
+func (q *querier) LabelNames() ([]string, promstorage.Warnings, error) {
+ // TODO (@shreyas): Implement this.
+ q.logger.Warn("calling unsupported LabelNames method")
+ return nil, nil, errors.New("not implemented")
+}
+
+func (q *querier) Close() error {
+ return nil
+}
+
+func fromWarningStrings(warnings []string) []error {
+ errs := make([]error, 0, len(warnings))
+ for _, warning := range warnings {
+ errs = append(errs, errors.New(warning))
+ }
+ return errs
+}
+
+// This is a copy of the prometheus remote.FromQueryResult method. Need to
+// copy so that this can understand m3 prompb struct.
+func fromQueryResult(sortSeries bool, res *prompb.QueryResult) promstorage.SeriesSet {
+ series := make([]promstorage.Series, 0, len(res.Timeseries))
+ for _, ts := range res.Timeseries {
+ labels := labelProtosToLabels(ts.Labels)
+ if err := validateLabelsAndMetricName(labels); err != nil {
+ return errSeriesSet{err: err}
+ }
+
+ series = append(series, &concreteSeries{
+ labels: labels,
+ samples: ts.Samples,
+ })
+ }
+
+ if sortSeries {
+ sort.Sort(byLabel(series))
+ }
+ return &concreteSeriesSet{
+ series: series,
+ }
+}
+
+type byLabel []promstorage.Series
+
+func (a byLabel) Len() int { return len(a) }
+func (a byLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
+
+func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
+ result := make(labels.Labels, 0, len(labelPairs))
+ for _, l := range labelPairs {
+ result = append(result, labels.Label{
+ Name: string(l.Name),
+ Value: string(l.Value),
+ })
+ }
+ sort.Sort(result)
+ return result
+}
+
+// errSeriesSet implements storage.SeriesSet, just returning an error.
+type errSeriesSet struct {
+ err error
+}
+
+func (errSeriesSet) Next() bool {
+ return false
+}
+
+func (errSeriesSet) At() promstorage.Series {
+ return nil
+}
+
+func (e errSeriesSet) Err() error {
+ return e.err
+}
+
+// concreteSeriesSet implements storage.SeriesSet.
+type concreteSeriesSet struct {
+ cur int
+ series []promstorage.Series
+}
+
+func (c *concreteSeriesSet) Next() bool {
+ c.cur++
+ return c.cur-1 < len(c.series)
+}
+
+func (c *concreteSeriesSet) At() promstorage.Series {
+ return c.series[c.cur-1]
+}
+
+func (c *concreteSeriesSet) Err() error {
+ return nil
+}
+
+// concreteSeries implements storage.Series.
+type concreteSeries struct {
+ labels labels.Labels
+ samples []prompb.Sample
+}
+
+func (c *concreteSeries) Labels() labels.Labels {
+ return labels.New(c.labels...)
+}
+
+func (c *concreteSeries) Iterator() chunkenc.Iterator {
+ return newConcreteSeriersIterator(c)
+}
+
+// concreteSeriesIterator implements storage.SeriesIterator.
+type concreteSeriesIterator struct {
+ cur int
+ series *concreteSeries
+}
+
+func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator {
+ return &concreteSeriesIterator{
+ cur: -1,
+ series: series,
+ }
+}
+
+// Seek implements storage.SeriesIterator.
+func (c *concreteSeriesIterator) Seek(t int64) bool {
+ c.cur = sort.Search(len(c.series.samples), func(n int) bool {
+ return c.series.samples[n].Timestamp >= t
+ })
+ return c.cur < len(c.series.samples)
+}
+
+// At implements storage.SeriesIterator.
+func (c *concreteSeriesIterator) At() (t int64, v float64) {
+ s := c.series.samples[c.cur]
+ return s.Timestamp, s.Value
+}
+
+// Next implements storage.SeriesIterator.
+func (c *concreteSeriesIterator) Next() bool {
+ c.cur++
+ return c.cur < len(c.series.samples)
+}
+
+// Err implements storage.SeriesIterator.
+func (c *concreteSeriesIterator) Err() error {
+ return nil
+}
+
+// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read,
+// also making sure that there are no labels with duplicate names
+func validateLabelsAndMetricName(ls labels.Labels) error {
+ for i, l := range ls {
+ if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) {
+ return errors.Errorf("invalid metric name: %v", l.Value)
+ }
+ if !model.LabelName(l.Name).IsValid() {
+ return errors.Errorf("invalid label name: %v", l.Name)
+ }
+ if !model.LabelValue(l.Value).IsValid() {
+ return errors.Errorf("invalid label value: %v", l.Value)
+ }
+ if i > 0 && l.Name == ls[i-1].Name {
+ return errors.Errorf("duplicate label with name: %v", l.Name)
+ }
+ }
+ return nil
+}
diff --git a/src/query/storage/remote/storage.go b/src/query/storage/remote/storage.go
index 81af826719..f8f420a9d1 100644
--- a/src/query/storage/remote/storage.go
+++ b/src/query/storage/remote/storage.go
@@ -28,6 +28,7 @@ import (
"github.com/m3db/m3/src/query/errors"
"github.com/m3db/m3/src/query/remote"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
)
// Options contains options for remote clients.
@@ -76,7 +77,7 @@ func (s *remoteStorage) CompleteTags(
ctx context.Context,
query *storage.CompleteTagsQuery,
options *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
return s.client.CompleteTags(ctx, query, options)
}
diff --git a/src/query/storage/restrict_query_options.go b/src/query/storage/restrict_query_options.go
index 19aa847fa7..5675378280 100644
--- a/src/query/storage/restrict_query_options.go
+++ b/src/query/storage/restrict_query_options.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
)
// Validate will validate the restrict fetch options.
@@ -68,13 +69,13 @@ func (o *RestrictByTag) GetMatchers() models.Matchers {
// Validate will validate the restrict type restrictions.
func (o *RestrictByType) Validate() error {
switch o.MetricsType {
- case UnaggregatedMetricsType:
+ case storagemetadata.UnaggregatedMetricsType:
if o.StoragePolicy != policy.EmptyStoragePolicy {
return fmt.Errorf(
"expected no storage policy for unaggregated metrics type, "+
"instead got: %v", o.StoragePolicy.String())
}
- case AggregatedMetricsType:
+ case storagemetadata.AggregatedMetricsType:
if v := o.StoragePolicy.Resolution().Window; v <= 0 {
return fmt.Errorf(
"expected positive resolution window, instead got: %v", v)
diff --git a/src/query/storage/storage_mock.go b/src/query/storage/storage_mock.go
index 4a9ec1c58d..acc653de15 100644
--- a/src/query/storage/storage_mock.go
+++ b/src/query/storage/storage_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/query/storage (interfaces: Storage)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -29,6 +29,7 @@ import (
"reflect"
"github.com/m3db/m3/src/query/block"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/golang/mock/gomock"
)
@@ -71,10 +72,10 @@ func (mr *MockStorageMockRecorder) Close() *gomock.Call {
}
// CompleteTags mocks base method
-func (m *MockStorage) CompleteTags(arg0 context.Context, arg1 *CompleteTagsQuery, arg2 *FetchOptions) (*CompleteTagsResult, error) {
+func (m *MockStorage) CompleteTags(arg0 context.Context, arg1 *CompleteTagsQuery, arg2 *FetchOptions) (*consolidators.CompleteTagsResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CompleteTags", arg0, arg1, arg2)
- ret0, _ := ret[0].(*CompleteTagsResult)
+ ret0, _ := ret[0].(*consolidators.CompleteTagsResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/src/query/storage/types.go b/src/query/storage/types.go
index 93935be85a..425c41b03e 100644
--- a/src/query/storage/types.go
+++ b/src/query/storage/types.go
@@ -22,6 +22,7 @@ package storage
import (
"context"
+ "errors"
"fmt"
"time"
@@ -30,12 +31,18 @@ import (
"github.com/m3db/m3/src/query/cost"
"github.com/m3db/m3/src/query/generated/proto/prompb"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
xtime "github.com/m3db/m3/src/x/time"
"github.com/uber-go/tally"
)
+var (
+ errWriteQueryNoDatapoints = errors.New("write query with no datapoints")
+)
+
// Type describes the type of storage.
type Type int
@@ -106,8 +113,12 @@ type FetchQuery struct {
type FetchOptions struct {
// Remote is set when this fetch is originated by a remote grpc call.
Remote bool
- // Limit is the maximum number of series to return.
- Limit int
+ // SeriesLimit is the maximum number of series to return.
+ SeriesLimit int
+ // DocsLimit is the maximum number of docs to return.
+ DocsLimit int
+ // RequireExhaustive results in an error if the query exceeds the series limit.
+ RequireExhaustive bool
// BlockType is the block type that the fetch function returns.
BlockType models.FetchedBlockType
// FanoutOptions are the options for the fetch namespace fanout.
@@ -127,6 +138,8 @@ type FetchOptions struct {
// IncludeResolution if set, appends resolution information to fetch results.
// Currently only used for graphite queries.
IncludeResolution bool
+ // Timeout is the timeout for the request.
+ Timeout time.Duration
}
// FanoutOptions describes which namespaces should be fanned out to for
@@ -155,75 +168,10 @@ const (
FanoutForceEnable
)
-// NewFetchOptions creates a new fetch options.
-func NewFetchOptions() *FetchOptions {
- return &FetchOptions{
- Limit: 0,
- BlockType: models.TypeSingleBlock,
- FanoutOptions: &FanoutOptions{
- FanoutUnaggregated: FanoutDefault,
- FanoutAggregated: FanoutDefault,
- FanoutAggregatedOptimized: FanoutDefault,
- },
- Enforcer: cost.NoopChainedEnforcer(),
- Scope: tally.NoopScope,
- }
-}
-
-// LookbackDurationOrDefault returns either the default lookback duration or
-// overridden lookback duration if set.
-func (o *FetchOptions) LookbackDurationOrDefault(
- defaultValue time.Duration,
-) time.Duration {
- if o.LookbackDuration == nil {
- return defaultValue
- }
- return *o.LookbackDuration
-}
-
-// QueryFetchOptions returns fetch options for a given query.
-func (o *FetchOptions) QueryFetchOptions(
- queryCtx *models.QueryContext,
- blockType models.FetchedBlockType,
-) (*FetchOptions, error) {
- r := o.Clone()
- if r.Limit <= 0 {
- r.Limit = queryCtx.Options.LimitMaxTimeseries
- }
-
- // Use inbuilt options for type restriction if none found.
- if r.RestrictQueryOptions.GetRestrictByType() == nil &&
- queryCtx.Options.RestrictFetchType != nil {
- v := queryCtx.Options.RestrictFetchType
- restrict := &RestrictByType{
- MetricsType: MetricsType(v.MetricsType),
- StoragePolicy: v.StoragePolicy,
- }
-
- if err := restrict.Validate(); err != nil {
- return nil, err
- }
-
- if r.RestrictQueryOptions == nil {
- r.RestrictQueryOptions = &RestrictQueryOptions{}
- }
-
- r.RestrictQueryOptions.RestrictByType = restrict
- }
-
- return r, nil
-}
-
-// Clone will clone and return the fetch options.
-func (o *FetchOptions) Clone() *FetchOptions {
- result := *o
- return &result
-}
-
// RestrictByType are specific restrictions to stick to a single data type.
type RestrictByType struct {
// MetricsType restricts the type of metrics being returned.
- MetricsType MetricsType
+ MetricsType storagemetadata.MetricsType
// StoragePolicy is required if metrics type is not unaggregated
// to specify which storage policy metrics should be returned from.
StoragePolicy policy.StoragePolicy
@@ -283,21 +231,26 @@ type Querier interface {
ctx context.Context,
query *CompleteTagsQuery,
options *FetchOptions,
- ) (*CompleteTagsResult, error)
+ ) (*consolidators.CompleteTagsResult, error)
}
// WriteQuery represents the input timeseries that is written to the database.
// TODO: rename WriteQuery to WriteRequest or something similar.
type WriteQuery struct {
+ // opts as a field allows the options to be unexported
+ // and the Validate method on WriteQueryOptions to be reused.
+ opts WriteQueryOptions
+}
+
+// WriteQueryOptions is a set of options to use to construct a write query.
+// These are passed by options so that they can be validated when creating
+// a write query, which helps knowing a constructed write query is valid.
+type WriteQueryOptions struct {
Tags models.Tags
Datapoints ts.Datapoints
Unit xtime.Unit
Annotation []byte
- Attributes Attributes
-}
-
-func (q *WriteQuery) String() string {
- return string(q.Tags.ID())
+ Attributes storagemetadata.Attributes
}
// CompleteTagsQuery represents a query that returns an autocompleted
@@ -336,36 +289,6 @@ func (q *CompleteTagsQuery) String() string {
return fmt.Sprintf("completing tag values for query %s", q.TagMatchers)
}
-// CompletedTag represents a tag retrieved by a complete tags query.
-type CompletedTag struct {
- // Name the name of the tag.
- Name []byte
- // Values is a set of possible values for the tag.
- // NB: if the parent CompleteTagsResult is set to CompleteNameOnly, this is
- // expected to be empty.
- Values [][]byte
-}
-
-// CompleteTagsResult represents a set of autocompleted tag names and values
-type CompleteTagsResult struct {
- // CompleteNameOnly indicates if the tags in this result are expected to have
- // both names and values, or only names.
- CompleteNameOnly bool
- // CompletedTag is a list of completed tags.
- CompletedTags []CompletedTag
- // Metadata describes any metadata for the operation.
- Metadata block.ResultMetadata
-}
-
-// CompleteTagsResultBuilder is a builder that accumulates and deduplicates
-// incoming CompleteTagsResult values.
-type CompleteTagsResultBuilder interface {
- // Add appends an incoming CompleteTagsResult.
- Add(*CompleteTagsResult) error
- // Build builds a completed tag result.
- Build() CompleteTagsResult
-}
-
// Appender provides batched appends against a storage.
type Appender interface {
// Write writes a batched set of datapoints to storage based on the provided
@@ -397,30 +320,3 @@ type PromResult struct {
// ResultMetadata is the metadata for the result.
Metadata block.ResultMetadata
}
-
-// MetricsType is a type of stored metrics.
-type MetricsType uint
-
-const (
- // UnknownMetricsType is the unknown metrics type and is invalid.
- UnknownMetricsType MetricsType = iota
- // UnaggregatedMetricsType is an unaggregated metrics type.
- UnaggregatedMetricsType
- // AggregatedMetricsType is an aggregated metrics type.
- AggregatedMetricsType
-
- // DefaultMetricsType is the default metrics type value.
- DefaultMetricsType = UnaggregatedMetricsType
-)
-
-// Attributes is a set of stored metrics attributes.
-type Attributes struct {
- // MetricsType indicates the type of namespace this metric originated from.
- MetricsType MetricsType
- // Retention indicates the retention of the namespace this metric originated
- // from.
- Retention time.Duration
- // Resolution indicates the retention of the namespace this metric originated
- // from.
- Resolution time.Duration
-}
diff --git a/src/query/storage/write.go b/src/query/storage/write.go
new file mode 100644
index 0000000000..131b18c5a3
--- /dev/null
+++ b/src/query/storage/write.go
@@ -0,0 +1,110 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package storage
+
+import (
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+ "github.com/m3db/m3/src/query/ts"
+ xerrors "github.com/m3db/m3/src/x/errors"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+// Validate will validate the write query options.
+func (o WriteQueryOptions) Validate() error {
+ if err := o.validate(); err != nil {
+ // NB(r): Always make sure returns invalid params error
+ // here so that 4XX is returned to client on remote write endpoint.
+ return xerrors.NewInvalidParamsError(err)
+ }
+ return nil
+}
+
+func (o WriteQueryOptions) validate() error {
+ if len(o.Datapoints) == 0 {
+ return errWriteQueryNoDatapoints
+ }
+ if err := o.Unit.Validate(); err != nil {
+ return err
+ }
+ // Note: expensive check last.
+ if err := o.Tags.Validate(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// NewWriteQuery returns a new write query after validating the options.
+func NewWriteQuery(opts WriteQueryOptions) (*WriteQuery, error) {
+ q := &WriteQuery{}
+ if err := q.Reset(opts); err != nil {
+ return nil, err
+ }
+ return q, nil
+}
+
+// Reset resets the write query for reuse.
+func (q *WriteQuery) Reset(opts WriteQueryOptions) error {
+ if err := opts.Validate(); err != nil {
+ return err
+ }
+ q.opts = opts
+ return nil
+}
+
+// Tags returns the tags.
+func (q WriteQuery) Tags() models.Tags {
+ return q.opts.Tags
+}
+
+// Datapoints returns the datapoints.
+func (q WriteQuery) Datapoints() ts.Datapoints {
+ return q.opts.Datapoints
+}
+
+// Unit returns the unit.
+func (q WriteQuery) Unit() xtime.Unit {
+ return q.opts.Unit
+}
+
+// Annotation returns the annotation.
+func (q WriteQuery) Annotation() []byte {
+ return q.opts.Annotation
+}
+
+// Attributes returns the attributes.
+func (q WriteQuery) Attributes() storagemetadata.Attributes {
+ return q.opts.Attributes
+}
+
+// Validate validates the write query.
+func (q *WriteQuery) Validate() error {
+ return q.opts.Validate()
+}
+
+// Options returns the options used to create the write query.
+func (q WriteQuery) Options() WriteQueryOptions {
+ return q.opts
+}
+
+func (q *WriteQuery) String() string {
+ return string(q.opts.Tags.ID())
+}
diff --git a/src/query/storage/write_test.go b/src/query/storage/write_test.go
new file mode 100644
index 0000000000..e5f945ad00
--- /dev/null
+++ b/src/query/storage/write_test.go
@@ -0,0 +1,92 @@
+// Copyright (c) 2018 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package storage
+
+import (
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
+ "github.com/m3db/m3/src/query/ts"
+ xerrors "github.com/m3db/m3/src/x/errors"
+ xtime "github.com/m3db/m3/src/x/time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewWriteQueryValidateTags(t *testing.T) {
+ // Create known bad tags.
+ tags := models.NewTags(0, models.NewTagOptions().
+ SetIDSchemeType(models.TypeQuoted))
+ tags = tags.AddTag(models.Tag{Name: []byte(""), Value: []byte("foo")})
+ require.Error(t, tags.Validate())
+
+ // Try to create a write query.
+ _, err := NewWriteQuery(WriteQueryOptions{
+ Tags: tags,
+ Datapoints: ts.Datapoints{
+ {
+ Timestamp: time.Now(),
+ Value: 42,
+ },
+ },
+ Unit: xtime.Second,
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
+ },
+ })
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
+
+func TestNewWriteQueryValidateDatapoints(t *testing.T) {
+ // Try to create a write query.
+ _, err := NewWriteQuery(WriteQueryOptions{
+ Tags: models.MustMakeTags("foo", "bar"),
+ Datapoints: ts.Datapoints{},
+ Unit: xtime.Second,
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
+ },
+ })
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
+
+func TestNewWriteQueryValidateUnit(t *testing.T) {
+ // Try to create a write query.
+ _, err := NewWriteQuery(WriteQueryOptions{
+ Tags: models.MustMakeTags("foo", "bar"),
+ Datapoints: ts.Datapoints{
+ {
+ Timestamp: time.Now(),
+ Value: 42,
+ },
+ },
+ Unit: xtime.Unit(999),
+ Attributes: storagemetadata.Attributes{
+ MetricsType: storagemetadata.UnaggregatedMetricsType,
+ },
+ })
+ require.Error(t, err)
+ require.True(t, xerrors.IsInvalidParams(err))
+}
diff --git a/src/query/test/block.go b/src/query/test/block.go
index bd960480be..47d5f3f238 100644
--- a/src/query/test/block.go
+++ b/src/query/test/block.go
@@ -35,12 +35,15 @@ type multiSeriesBlock struct {
lookbackDuration time.Duration
meta block.Metadata
seriesList ts.SeriesList
+ query *storage.FetchQuery
+ enableBatched bool
}
func newMultiSeriesBlock(
fetchResult *storage.FetchResult,
query *storage.FetchQuery,
lookbackDuration time.Duration,
+ enableBatched bool,
) block.Block {
meta := block.Metadata{
Bounds: models.Bounds{
@@ -56,6 +59,8 @@ func newMultiSeriesBlock(
seriesList: fetchResult.SeriesList,
meta: meta,
lookbackDuration: lookbackDuration,
+ enableBatched: enableBatched,
+ query: query,
}
}
@@ -79,7 +84,39 @@ func (m multiSeriesBlock) SeriesIter() (block.SeriesIter, error) {
func (m multiSeriesBlock) MultiSeriesIter(
concurrency int,
) ([]block.SeriesIterBatch, error) {
- return nil, errors.New("batched iterator is not supported by test block")
+ if !m.enableBatched {
+ return nil,
+ errors.New("batched iterator is not supported by this test block")
+ }
+
+ batches := make([]ts.SeriesList, 0, concurrency)
+ for i := 0; i < concurrency; i++ {
+ batches = append(batches, make(ts.SeriesList, 0, 10))
+ }
+
+ // round-robin series.
+ for i, seriesList := range m.seriesList {
+ batches[i%concurrency] = append(batches[i%concurrency], seriesList)
+ }
+
+ seriesIterBatches := make([]block.SeriesIterBatch, 0, concurrency)
+ for _, batch := range batches {
+ insideBlock := newMultiSeriesBlock(&storage.FetchResult{
+ SeriesList: batch,
+ Metadata: m.meta.ResultMetadata,
+ }, m.query, m.lookbackDuration, false)
+ it, err := insideBlock.SeriesIter()
+ if err != nil {
+ return nil, err
+ }
+
+ seriesIterBatches = append(seriesIterBatches, block.SeriesIterBatch{
+ Iter: it,
+ Size: len(batch),
+ })
+ }
+
+ return seriesIterBatches, nil
}
func (m multiSeriesBlock) SeriesMeta() []block.SeriesMeta {
diff --git a/src/query/test/builder.go b/src/query/test/builder.go
index 066589ccb5..ef4a40a315 100644
--- a/src/query/test/builder.go
+++ b/src/query/test/builder.go
@@ -28,6 +28,8 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/ts"
+
+ "github.com/prometheus/common/model"
)
// ValueMod can be used to modify provided values for testing.
@@ -54,6 +56,7 @@ func NewUnconsolidatedBlockFromDatapointsWithMeta(
bounds models.Bounds,
meta []block.SeriesMeta,
seriesValues [][]float64,
+ enableBatched bool,
) block.Block {
seriesList := make(ts.SeriesList, len(seriesValues))
for i, values := range seriesValues {
@@ -70,21 +73,7 @@ func NewUnconsolidatedBlockFromDatapointsWithMeta(
Start: bounds.Start,
End: bounds.End(),
Interval: bounds.StepSize,
- }, time.Minute)
-}
-
-// NewUnconsolidatedBlockFromDatapoints creates a new unconsolidated block
-// using the provided values.
-func NewUnconsolidatedBlockFromDatapoints(
- bounds models.Bounds,
- seriesValues [][]float64,
-) block.Block {
- meta := NewSeriesMeta("dummy", len(seriesValues))
- return NewUnconsolidatedBlockFromDatapointsWithMeta(
- bounds,
- meta,
- seriesValues,
- )
+ }, time.Minute, enableBatched)
}
func seriesValuesToDatapoints(
@@ -121,7 +110,10 @@ func newSeriesMeta(tagPrefix string, count int, name bool) []block.SeriesMeta {
tags := models.EmptyTags()
st := []byte(fmt.Sprintf("%s%d", tagPrefix, i))
if name {
- tags = tags.AddTag(models.Tag{Name: []byte("__name__"), Value: st})
+ tags = tags.AddTag(models.Tag{
+ Name: []byte(model.MetricNameLabel),
+ Value: st,
+ })
}
tags = tags.AddTag(models.Tag{Name: st, Value: st})
diff --git a/src/query/test/comparison.go b/src/query/test/comparison.go
index bc469d7b5a..2ee146dacb 100644
--- a/src/query/test/comparison.go
+++ b/src/query/test/comparison.go
@@ -122,25 +122,27 @@ func (m matches) Less(i, j int) bool {
) == -1
}
-// CompareLists compares series meta / index pairs
-func CompareLists(t *testing.T, meta, exMeta []block.SeriesMeta, index, exIndex [][]int) {
+// CompareListsInOrder compares series meta / index pairs (order sensitive)
+func CompareListsInOrder(t *testing.T, meta, exMeta []block.SeriesMeta, index, exIndex [][]int) {
require.Equal(t, len(exIndex), len(exMeta))
- require.Equal(t, len(exMeta), len(meta))
- require.Equal(t, len(exIndex), len(index))
- ex := make(matches, len(meta))
- actual := make(matches, len(meta))
- // build matchers
- for i := range meta {
- ex[i] = match{exIndex[i], exMeta[i].Tags.Tags, exMeta[i].Name, []float64{}}
- actual[i] = match{index[i], meta[i].Tags.Tags, meta[i].Name, []float64{}}
+ assert.Equal(t, exMeta, meta)
+ assert.Equal(t, exIndex, index)
+}
+
+// CompareValuesInOrder compares series meta / value pairs (order sensitive)
+func CompareValuesInOrder(t *testing.T, meta, exMeta []block.SeriesMeta, vals, exVals [][]float64) {
+ require.Equal(t, len(exVals), len(exMeta))
+ require.Equal(t, len(exVals), len(vals), "Vals is", meta, "ExVals is", exMeta)
+
+ assert.Equal(t, exMeta, meta)
+
+ for i := range exVals {
+ EqualsWithNansWithDelta(t, exVals[i], vals[i], 0.00001)
}
- sort.Sort(ex)
- sort.Sort(actual)
- assert.Equal(t, ex, actual)
}
-// CompareValues compares series meta / value pairs
+// CompareValues compares series meta / value pairs (order insensitive)
func CompareValues(t *testing.T, meta, exMeta []block.SeriesMeta, vals, exVals [][]float64) {
require.Equal(t, len(exVals), len(exMeta))
require.Equal(t, len(exMeta), len(meta), "Meta is", meta, "ExMeta is", exMeta)
diff --git a/src/query/test/compatibility/m3comparator_client.go b/src/query/test/compatibility/m3comparator_client.go
new file mode 100644
index 0000000000..58819d853b
--- /dev/null
+++ b/src/query/test/compatibility/m3comparator_client.go
@@ -0,0 +1,74 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package compatibility
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+)
+
+type m3comparatorClient struct {
+ host string
+ port int
+}
+
+func newM3ComparatorClient(host string, port int) *m3comparatorClient {
+ return &m3comparatorClient{
+ host: host,
+ port: port,
+ }
+}
+
+func (c *m3comparatorClient) clear() error {
+ comparatorURL := fmt.Sprintf("http://%s:%d", c.host, c.port)
+ req, err := http.NewRequest(http.MethodDelete, comparatorURL, nil)
+ if err != nil {
+ return err
+ }
+
+ _, err = http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *m3comparatorClient) load(data []byte) error {
+ comparatorURL := fmt.Sprintf("http://%s:%d", c.host, c.port)
+ resp, err := http.Post(comparatorURL, "application/json", bytes.NewReader(data))
+ if err != nil {
+ return fmt.Errorf("got error loading data to comparator %v", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode/200 == 1 {
+ return nil
+ }
+
+ bodyBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("load status code %d. Error: %v", resp.StatusCode, err)
+ }
+
+ return fmt.Errorf("load status code %d. Response: %s", resp.StatusCode, string(bodyBytes))
+}
diff --git a/src/query/test/compatibility/m3query_client.go b/src/query/test/compatibility/m3query_client.go
new file mode 100644
index 0000000000..067f8faa57
--- /dev/null
+++ b/src/query/test/compatibility/m3query_client.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package compatibility
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+type m3queryClient struct {
+ host string
+ port int
+}
+
+func newM3QueryClient(host string, port int) *m3queryClient {
+ return &m3queryClient{
+ host: host,
+ port: port,
+ }
+}
+
+func (c *m3queryClient) query(expr string, t time.Time) ([]byte, error) {
+ url := fmt.Sprintf("http://%s:%d/m3query/api/v1/query", c.host, c.port)
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ q := req.URL.Query()
+ q.Add("query", expr)
+ q.Add("time", fmt.Sprint(t.Unix()))
+ req.URL.RawQuery = q.Encode()
+ resp, err := http.DefaultClient.Do(req)
+
+ if err != nil {
+ return nil, errors.Wrapf(err, "error evaluating query %s", expr)
+ }
+
+ defer resp.Body.Close()
+ if resp.StatusCode/200 != 1 {
+ return nil, fmt.Errorf("invalid status %+v received sending query: %+v", resp.StatusCode, req)
+ }
+
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/src/query/test/compatibility/promql_test.go b/src/query/test/compatibility/promql_test.go
new file mode 100644
index 0000000000..d5f756800f
--- /dev/null
+++ b/src/query/test/compatibility/promql_test.go
@@ -0,0 +1,59 @@
+// +build compatibility
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This code was taken from prometheus repo: https://github.com/prometheus/prometheus/blob/master/promql/promql_test.go
+
+package compatibility
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestEvaluations(t *testing.T) {
+ files, err := filepath.Glob("testdata/*.test")
+ require.NoError(t, err)
+
+ for _, fn := range files {
+ test, err := newTestFromFile(t, fn)
+ require.NoError(t, err)
+
+ require.NoError(t, test.Run())
+
+ test.Close()
+ }
+}
diff --git a/src/query/test/compatibility/test.go b/src/query/test/compatibility/test.go
new file mode 100644
index 0000000000..f67600c8f3
--- /dev/null
+++ b/src/query/test/compatibility/test.go
@@ -0,0 +1,580 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Parts of code were taken from prometheus repo: https://github.com/prometheus/prometheus/blob/master/promql/test.go
+
+package compatibility
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ cparser "github.com/m3db/m3/src/cmd/services/m3comparator/main/parser"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus"
+
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/pkg/labels"
+ "github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/util/testutil"
+)
+
+var (
+ minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64.
+
+ patSpace = regexp.MustCompile("[\t ]+")
+ patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
+ patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
+)
+
+const (
+ epsilon = 0.000001 // Relative error allowed for sample values.
+ startingTime = 1587393285000000000 // 2020-04-20 17:34:45
+)
+
+var testStartTime = time.Unix(0, 0).UTC()
+
+// Test is a sequence of read and write commands that are run
+// against a test storage.
+type Test struct {
+ testutil.T
+
+ cmds []testCommand
+
+ context context.Context
+
+ m3comparator *m3comparatorClient
+}
+
+// NewTest returns an initialized empty Test.
+func NewTest(t testutil.T, input string) (*Test, error) {
+ test := &Test{
+ T: t,
+ cmds: []testCommand{},
+ m3comparator: newM3ComparatorClient("localhost", 9001),
+ }
+ err := test.parse(input)
+ if err != nil {
+ return test, err
+ }
+ err = test.clear()
+ return test, err
+}
+
+func newTestFromFile(t testutil.T, filename string) (*Test, error) {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return NewTest(t, string(content))
+}
+
+func raise(line int, format string, v ...interface{}) error {
+ return &parser.ParseErr{
+ LineOffset: line,
+ Err: errors.Errorf(format, v...),
+ }
+}
+
+func (t *Test) parseLoad(lines []string, i int) (int, *loadCmd, error) {
+ if !patLoad.MatchString(lines[i]) {
+ return i, nil, raise(i, "invalid load command. (load )")
+ }
+ parts := patLoad.FindStringSubmatch(lines[i])
+
+ gap, err := model.ParseDuration(parts[1])
+ if err != nil {
+ return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
+ }
+ cmd := newLoadCmd(t.m3comparator, time.Duration(gap))
+ for i+1 < len(lines) {
+ i++
+ defLine := lines[i]
+ if len(defLine) == 0 {
+ i--
+ break
+ }
+ metric, vals, err := parser.ParseSeriesDesc(defLine)
+ if err != nil {
+ if perr, ok := err.(*parser.ParseErr); ok {
+ perr.LineOffset = i
+ }
+ return i, nil, err
+ }
+ cmd.set(metric, vals...)
+ }
+ return i, cmd, nil
+}
+
+func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
+ if !patEvalInstant.MatchString(lines[i]) {
+ return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at ] ")
+ }
+ parts := patEvalInstant.FindStringSubmatch(lines[i])
+ var (
+ mod = parts[1]
+ at = parts[2]
+ expr = parts[3]
+ )
+ _, err := parser.ParseExpr(expr)
+ if err != nil {
+ if perr, ok := err.(*parser.ParseErr); ok {
+ perr.LineOffset = i
+ posOffset := parser.Pos(strings.Index(lines[i], expr))
+ perr.PositionRange.Start += posOffset
+ perr.PositionRange.End += posOffset
+ perr.Query = lines[i]
+ }
+ return i, nil, err
+ }
+
+ offset, err := model.ParseDuration(at)
+ if err != nil {
+ return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
+ }
+ ts := testStartTime.Add(time.Duration(offset))
+
+ cmd := newEvalCmd(expr, ts, i+1)
+ switch mod {
+ case "ordered":
+ cmd.ordered = true
+ case "fail":
+ cmd.fail = true
+ }
+
+ for j := 1; i+1 < len(lines); j++ {
+ i++
+ defLine := lines[i]
+ if len(defLine) == 0 {
+ i--
+ break
+ }
+ if f, err := parseNumber(defLine); err == nil {
+ cmd.expect(0, nil, parser.SequenceValue{Value: f})
+ break
+ }
+ metric, vals, err := parser.ParseSeriesDesc(defLine)
+ if err != nil {
+ if perr, ok := err.(*parser.ParseErr); ok {
+ perr.LineOffset = i
+ }
+ return i, nil, err
+ }
+
+ // Currently, we are not expecting any matrices.
+ if len(vals) > 1 {
+ return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
+ }
+ cmd.expect(j, metric, vals...)
+ }
+ return i, cmd, nil
+}
+
+// getLines returns trimmed lines after removing the comments.
+func getLines(input string) []string {
+ lines := strings.Split(input, "\n")
+ for i, l := range lines {
+ l = strings.TrimSpace(l)
+ if strings.HasPrefix(l, "#") {
+ l = ""
+ }
+ lines[i] = l
+ }
+ return lines
+}
+
+// parse the given command sequence and appends it to the test.
+func (t *Test) parse(input string) error {
+ lines := getLines(input)
+ var err error
+ // Scan for steps line by line.
+ for i := 0; i < len(lines); i++ {
+ l := lines[i]
+ if len(l) == 0 {
+ continue
+ }
+ var cmd testCommand
+
+ switch c := strings.ToLower(patSpace.Split(l, 2)[0]); {
+ case c == "clear":
+ cmd = &clearCmd{}
+ case c == "load":
+ i, cmd, err = t.parseLoad(lines, i)
+ case strings.HasPrefix(c, "eval"):
+ i, cmd, err = t.parseEval(lines, i)
+ default:
+ return raise(i, "invalid command %q", l)
+ }
+ if err != nil {
+ return err
+ }
+ t.cmds = append(t.cmds, cmd)
+ }
+ return nil
+}
+
+// testCommand is an interface that ensures that only the package internal
+// types can be a valid command for a test.
+type testCommand interface {
+ testCmd()
+}
+
+func (*clearCmd) testCmd() {}
+func (*loadCmd) testCmd() {}
+func (*evalCmd) testCmd() {}
+
+// loadCmd is a command that loads sequences of sample values for specific
+// metrics into the storage.
+type loadCmd struct {
+ gap time.Duration
+ metrics map[uint64]labels.Labels
+ defs map[uint64][]promql.Point
+ m3compClient *m3comparatorClient
+}
+
+func newLoadCmd(m3compClient *m3comparatorClient, gap time.Duration) *loadCmd {
+ return &loadCmd{
+ gap: gap,
+ metrics: map[uint64]labels.Labels{},
+ defs: map[uint64][]promql.Point{},
+ m3compClient: m3compClient,
+ }
+}
+
+func (cmd loadCmd) String() string {
+ return "load"
+}
+
+// set a sequence of sample values for the given metric.
+func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
+ h := m.Hash()
+
+ samples := make([]promql.Point, 0, len(vals))
+ ts := testStartTime
+ for _, v := range vals {
+ if !v.Omitted {
+ samples = append(samples, promql.Point{
+ T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
+ V: v.Value,
+ })
+ }
+ ts = ts.Add(cmd.gap)
+ }
+ cmd.defs[h] = samples
+ cmd.metrics[h] = m
+}
+
+// append the defined time series to the storage.
+func (cmd *loadCmd) append() error {
+ series := make([]cparser.Series, 0, len(cmd.defs))
+
+ for h, smpls := range cmd.defs {
+ m := cmd.metrics[h]
+ start := time.Unix(0, startingTime)
+
+ ser := cparser.Series{
+ Tags: make(cparser.Tags, 0, len(m)),
+ Start: start,
+ Datapoints: make(cparser.Datapoints, 0, len(smpls)),
+ }
+ for _, l := range m {
+ ser.Tags = append(ser.Tags, cparser.NewTag(l.Name, l.Value))
+ }
+
+ for _, s := range smpls {
+ ts := start.Add(time.Duration(s.T) * time.Millisecond)
+ ser.Datapoints = append(ser.Datapoints, cparser.Datapoint{
+ Timestamp: ts,
+ Value: cparser.Value(s.V),
+ })
+
+ ser.End = ts.Add(cmd.gap * time.Millisecond)
+ }
+ series = append(series, ser)
+ }
+
+ j, err := json.Marshal(series)
+ if err != nil {
+ return err
+ }
+
+ return cmd.m3compClient.load(j)
+}
+
+// evalCmd is a command that evaluates an expression for the given time (range)
+// and expects a specific result.
+type evalCmd struct {
+ expr string
+ start time.Time
+ line int
+
+ fail, ordered bool
+
+ metrics map[uint64]labels.Labels
+ expected map[uint64]entry
+ m3query *m3queryClient
+}
+
+type entry struct {
+ pos int
+ vals []parser.SequenceValue
+}
+
+func (e entry) String() string {
+ return fmt.Sprintf("%d: %s", e.pos, e.vals)
+}
+
+func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
+ return &evalCmd{
+ expr: expr,
+ start: start,
+ line: line,
+
+ metrics: map[uint64]labels.Labels{},
+ expected: map[uint64]entry{},
+ m3query: newM3QueryClient("localhost", 7201),
+ }
+}
+
+func (ev *evalCmd) String() string {
+ return "eval"
+}
+
+// expect adds a new metric with a sequence of values to the set of expected
+// results for the query.
+func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...parser.SequenceValue) {
+ if m == nil {
+ ev.expected[0] = entry{pos: pos, vals: vals}
+ return
+ }
+ h := m.Hash()
+ ev.metrics[h] = m
+ ev.expected[h] = entry{pos: pos, vals: vals}
+}
+
+// Hash returns a hash value for the label set.
+func hash(ls prometheus.Tags) uint64 {
+ lbs := make(labels.Labels, 0, len(ls))
+ for k, v := range ls {
+ lbs = append(lbs, labels.Label{
+ Name: k,
+ Value: v,
+ })
+ }
+
+ sort.Slice(lbs[:], func(i, j int) bool {
+ return lbs[i].Name < lbs[j].Name
+ })
+
+ return lbs.Hash()
+}
+
+// compareResult compares the result value with the defined expectation.
+func (ev *evalCmd) compareResult(j []byte) error {
+ var response prometheus.Response
+ err := json.Unmarshal(j, &response)
+ if err != nil {
+ return err
+ }
+
+ if response.Status != "success" {
+ return fmt.Errorf("unsuccess status received: %s", response.Status)
+ }
+
+ result := response.Data.Result
+
+ switch result := result.(type) {
+ case *prometheus.MatrixResult:
+ return errors.New("received range result on instant evaluation")
+
+ case *prometheus.VectorResult:
+ seen := map[uint64]bool{}
+ for pos, v := range result.Result {
+ fp := hash(v.Metric)
+ if _, ok := ev.metrics[fp]; !ok {
+ return errors.Errorf("unexpected metric %s in result", v.Metric)
+ }
+
+ exp := ev.expected[fp]
+ if ev.ordered && exp.pos != pos+1 {
+ return errors.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
+ }
+ val, err := parseNumber(fmt.Sprint(v.Value[1]))
+ if err != nil {
+ return err
+ }
+ if !almostEqual(exp.vals[0].Value, val) {
+ return errors.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, val)
+ }
+ seen[fp] = true
+ }
+
+ for fp, expVals := range ev.expected {
+ if !seen[fp] {
+ fmt.Println("vector result", len(result.Result), ev.expr)
+ for _, ss := range result.Result {
+ fmt.Println(" ", ss.Metric, ss.Value)
+ }
+ return errors.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals)
+ }
+ }
+
+ case *prometheus.ScalarResult:
+ v, err := parseNumber(fmt.Sprint(result.Result[1]))
+ if err != nil {
+ return err
+ }
+ if len(ev.expected) == 0 || len(ev.expected[0].vals) == 0 {
+ return errors.Errorf("expected no Scalar value but got %v", v)
+ }
+ expected := ev.expected[0].vals[0].Value
+ if !almostEqual(expected, v) {
+ return errors.Errorf("expected Scalar %v but got %v", expected, v)
+ }
+
+ default:
+ panic(errors.Errorf("promql.Test.compareResult: unexpected result type %T", result))
+ }
+
+ return nil
+}
+
+// clearCmd is a command that wipes the test's storage state.
+type clearCmd struct{}
+
+func (cmd clearCmd) String() string {
+ return "clear"
+}
+
+// Run executes the command sequence of the test. Until the maximum error number
+// is reached, evaluation errors do not terminate execution.
+func (t *Test) Run() error {
+ for _, cmd := range t.cmds {
+ // TODO(fabxc): aggregate command errors, yield diffs for result
+ // comparison errors.
+ if err := t.exec(cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// exec processes a single step of the test.
+func (t *Test) exec(tc testCommand) error {
+ switch cmd := tc.(type) {
+ case *clearCmd:
+ return t.clear()
+
+ case *loadCmd:
+ return cmd.append()
+
+ case *evalCmd:
+ expr, err := parser.ParseExpr(cmd.expr)
+ if err != nil {
+ return err
+ }
+
+ t := time.Unix(0, startingTime+(cmd.start.Unix()*1000000000))
+ bodyBytes, err := cmd.m3query.query(expr.String(), t)
+ if err != nil {
+ if cmd.fail {
+ return nil
+ }
+ return errors.Wrapf(err, "error in %s %s, line %d", cmd, cmd.expr, cmd.line)
+ }
+ if cmd.fail {
+ return fmt.Errorf("expected to fail at %s %s, line %d", cmd, cmd.expr, cmd.line)
+ }
+
+ err = cmd.compareResult(bodyBytes)
+ if err != nil {
+ return errors.Wrapf(err, "error in %s %s, line %d. m3query response: %s", cmd, cmd.expr, cmd.line, string(bodyBytes))
+ }
+
+ default:
+ panic("promql.Test.exec: unknown test command type")
+ }
+ return nil
+}
+
+// clear the current test storage of all inserted samples.
+func (t *Test) clear() error {
+ return t.m3comparator.clear()
+}
+
+// Close closes resources associated with the Test.
+func (t *Test) Close() {
+}
+
+// samplesAlmostEqual returns true if the two sample lines only differ by a
+// small relative error in their sample value.
+func almostEqual(a, b float64) bool {
+ // NaN has no equality but for testing we still want to know whether both values
+ // are NaN.
+ if math.IsNaN(a) && math.IsNaN(b) {
+ return true
+ }
+
+ // Cf. http://floating-point-gui.de/errors/comparison/
+ if a == b {
+ return true
+ }
+
+ diff := math.Abs(a - b)
+
+ if a == 0 || b == 0 || diff < minNormal {
+ return diff < epsilon
+ }
+ return diff/(math.Abs(a)+math.Abs(b)) < epsilon
+}
+
+func parseNumber(s string) (float64, error) {
+ n, err := strconv.ParseInt(s, 0, 64)
+ f := float64(n)
+ if err != nil {
+ f, err = strconv.ParseFloat(s, 64)
+ }
+ if err != nil {
+ return 0, errors.Wrap(err, "error parsing number")
+ }
+ return f, nil
+}
diff --git a/src/query/test/compatibility/testdata/LICENSE.txt b/src/query/test/compatibility/testdata/LICENSE.txt
new file mode 100644
index 0000000000..6133dce0f9
--- /dev/null
+++ b/src/query/test/compatibility/testdata/LICENSE.txt
@@ -0,0 +1,34 @@
+// Modifications Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This code was taken from prometheus repo: https://github.com/prometheus/prometheus/tree/master/promql/testdata
diff --git a/src/query/test/compatibility/testdata/aggregators.test b/src/query/test/compatibility/testdata/aggregators.test
new file mode 100644
index 0000000000..716395e8b7
--- /dev/null
+++ b/src/query/test/compatibility/testdata/aggregators.test
@@ -0,0 +1,305 @@
+load 5m
+ http_requests{job="api-server", instance="0", group="production"} 0+10x10
+ http_requests{job="api-server", instance="1", group="production"} 0+20x10
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x10
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x10
+ http_requests{job="app-server", instance="0", group="production"} 0+50x10
+ http_requests{job="app-server", instance="1", group="production"} 0+60x10
+ http_requests{job="app-server", instance="0", group="canary"} 0+70x10
+ http_requests{job="app-server", instance="1", group="canary"} 0+80x10
+
+load 5m
+ foo{job="api-server", instance="0", region="europe"} 0+90x10
+ foo{job="api-server"} 0+100x10
+
+# Simple sum.
+eval instant at 50m SUM BY (group) (http_requests{job="api-server"})
+ {group="canary"} 700
+ {group="production"} 300
+
+eval instant at 50m SUM BY (group) (((http_requests{job="api-server"})))
+ {group="canary"} 700
+ {group="production"} 300
+
+# Test alternative "by"-clause order.
+eval instant at 50m sum by (group) (http_requests{job="api-server"})
+ {group="canary"} 700
+ {group="production"} 300
+
+# Simple average.
+eval instant at 50m avg by (group) (http_requests{job="api-server"})
+ {group="canary"} 350
+ {group="production"} 150
+
+# Simple count.
+eval instant at 50m count by (group) (http_requests{job="api-server"})
+ {group="canary"} 2
+ {group="production"} 2
+
+# Simple without.
+eval instant at 50m sum without (instance) (http_requests{job="api-server"})
+ {group="canary",job="api-server"} 700
+ {group="production",job="api-server"} 300
+
+# Empty by.
+eval instant at 50m sum by () (http_requests{job="api-server"})
+ {} 1000
+
+# No by/without.
+eval instant at 50m sum(http_requests{job="api-server"})
+ {} 1000
+
+# Empty without.
+eval instant at 50m sum without () (http_requests{job="api-server",group="production"})
+ {group="production",job="api-server",instance="0"} 100
+ {group="production",job="api-server",instance="1"} 200
+
+# Without with mismatched and missing labels. Do not do this.
+eval instant at 50m sum without (instance) (http_requests{job="api-server"} or foo)
+ {group="canary",job="api-server"} 700
+ {group="production",job="api-server"} 300
+ {region="europe",job="api-server"} 900
+ {job="api-server"} 1000
+
+# Lower-cased aggregation operators should work too.
+eval instant at 50m sum(http_requests) by (job) + min(http_requests) by (job) + max(http_requests) by (job) + avg(http_requests) by (job)
+ {job="app-server"} 4550
+ {job="api-server"} 1750
+
+# Test alternative "by"-clause order.
+eval instant at 50m sum by (group) (http_requests{job="api-server"})
+ {group="canary"} 700
+ {group="production"} 300
+
+# Test both alternative "by"-clause orders in one expression.
+# Public health warning: stick to one form within an expression (or even
+# in an organization), or risk serious user confusion.
+eval instant at 50m sum(sum by (group) (http_requests{job="api-server"})) by (job)
+ {} 1000
+
+
+
+# Standard deviation and variance.
+eval instant at 50m stddev(http_requests)
+ {} 229.12878474779
+
+eval instant at 50m stddev by (instance)(http_requests)
+ {instance="0"} 223.60679774998
+ {instance="1"} 223.60679774998
+
+eval instant at 50m stdvar(http_requests)
+ {} 52500
+
+eval instant at 50m stdvar by (instance)(http_requests)
+ {instance="0"} 50000
+ {instance="1"} 50000
+
+# Float precision test for standard deviation and variance
+clear
+load 5m
+ http_requests{job="api-server", instance="0", group="production"} 0+1.33x10
+ http_requests{job="api-server", instance="1", group="production"} 0+1.33x10
+ http_requests{job="api-server", instance="0", group="canary"} 0+1.33x10
+
+# FAILING issue #10. (it is almost zero)
+#eval instant at 50m stddev(http_requests)
+# {} 0.0
+
+# FAILING issue #11. (it is almost zero)
+#eval instant at 50m stdvar(http_requests)
+# {} 0.0
+
+
+
+# Regression test for missing separator byte in labelsToGroupingKey.
+clear
+load 5m
+ label_grouping_test{a="aa", b="bb"} 0+10x10
+ label_grouping_test{a="a", b="abb"} 0+20x10
+
+eval instant at 50m sum(label_grouping_test) by (a, b)
+ {a="a", b="abb"} 200
+ {a="aa", b="bb"} 100
+
+
+
+# Tests for min/max.
+clear
+load 5m
+ http_requests{job="api-server", instance="0", group="production"} 1
+ http_requests{job="api-server", instance="1", group="production"} 2
+ http_requests{job="api-server", instance="0", group="canary"} NaN
+ http_requests{job="api-server", instance="1", group="canary"} 3
+ http_requests{job="api-server", instance="2", group="canary"} 4
+
+eval instant at 0m max(http_requests)
+ {} 4
+
+eval instant at 0m min(http_requests)
+ {} 1
+
+eval instant at 0m max by (group) (http_requests)
+ {group="production"} 2
+ {group="canary"} 4
+
+eval instant at 0m min by (group) (http_requests)
+ {group="production"} 1
+ {group="canary"} 3
+
+clear
+
+# Tests for topk/bottomk.
+load 5m
+ http_requests{job="api-server", instance="0", group="production"} 0+10x10
+ http_requests{job="api-server", instance="1", group="production"} 0+20x10
+ http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x10
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x10
+ http_requests{job="app-server", instance="0", group="production"} 0+50x10
+ http_requests{job="app-server", instance="1", group="production"} 0+60x10
+ http_requests{job="app-server", instance="0", group="canary"} 0+70x10
+ http_requests{job="app-server", instance="1", group="canary"} 0+80x10
+ foo 3+0x10
+
+# FAILING issue #12. All topk and bottomk tests are failing.
+#eval_ordered instant at 50m topk(3, http_requests)
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="production", instance="1", job="app-server"} 600
+
+#eval_ordered instant at 50m topk((3), (http_requests))
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="production", instance="1", job="app-server"} 600
+
+#eval_ordered instant at 50m topk(5, http_requests{group="canary",job="app-server"})
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="canary", instance="0", job="app-server"} 700
+
+#eval_ordered instant at 50m bottomk(3, http_requests)
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="production", instance="1", job="api-server"} 200
+# http_requests{group="canary", instance="0", job="api-server"} 300
+
+#eval_ordered instant at 50m bottomk(5, http_requests{group="canary",job="app-server"})
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="canary", instance="1", job="app-server"} 800
+
+eval instant at 50m topk by (group) (1, http_requests)
+ http_requests{group="production", instance="1", job="app-server"} 600
+ http_requests{group="canary", instance="1", job="app-server"} 800
+
+#eval instant at 50m bottomk by (group) (2, http_requests)
+# http_requests{group="canary", instance="0", job="api-server"} 300
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="production", instance="1", job="api-server"} 200
+
+#eval_ordered instant at 50m bottomk by (group) (2, http_requests{group="production"})
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="production", instance="1", job="api-server"} 200
+
+# Test NaN is sorted away from the top/bottom.
+#eval_ordered instant at 50m topk(3, http_requests{job="api-server",group="production"})
+# http_requests{job="api-server", instance="1", group="production"} 200
+# http_requests{job="api-server", instance="0", group="production"} 100
+# http_requests{job="api-server", instance="2", group="production"} NaN
+
+#eval_ordered instant at 50m bottomk(3, http_requests{job="api-server",group="production"})
+# http_requests{job="api-server", instance="0", group="production"} 100
+# http_requests{job="api-server", instance="1", group="production"} 200
+# http_requests{job="api-server", instance="2", group="production"} NaN
+
+# Test topk and bottomk allocate min(k, input_vector) for results vector
+#eval_ordered instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"})
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="canary", instance="1", job="app-server"} 800
+
+#eval_ordered instant at 50m topk(9999999999, http_requests{job="api-server",group="production"})
+# http_requests{job="api-server", instance="1", group="production"} 200
+# http_requests{job="api-server", instance="0", group="production"} 100
+# http_requests{job="api-server", instance="2", group="production"} NaN
+
+# Bug #5276.
+#eval_ordered instant at 50m topk(scalar(foo), http_requests)
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="production", instance="1", job="app-server"} 600
+
+clear
+
+# Tests for count_values.
+load 5m
+ version{job="api-server", instance="0", group="production"} 6
+ version{job="api-server", instance="1", group="production"} 6
+ version{job="api-server", instance="2", group="production"} 6
+ version{job="api-server", instance="0", group="canary"} 8
+ version{job="api-server", instance="1", group="canary"} 8
+ version{job="app-server", instance="0", group="production"} 6
+ version{job="app-server", instance="1", group="production"} 6
+ version{job="app-server", instance="0", group="canary"} 7
+ version{job="app-server", instance="1", group="canary"} 7
+
+# FAILING issue #14 (count_values)
+#eval instant at 5m count_values("version", version)
+# {version="6"} 5
+# {version="7"} 2
+# {version="8"} 2
+
+
+#eval instant at 5m count_values(((("version"))), version)
+# {version="6"} 5
+# {version="7"} 2
+# {version="8"} 2
+
+
+#eval instant at 5m count_values without (instance)("version", version)
+# {job="api-server", group="production", version="6"} 3
+# {job="api-server", group="canary", version="8"} 2
+# {job="app-server", group="production", version="6"} 2
+# {job="app-server", group="canary", version="7"} 2
+
+# Overwrite label with output. Don't do this.
+#eval instant at 5m count_values without (instance)("job", version)
+# {job="6", group="production"} 5
+# {job="8", group="canary"} 2
+# {job="7", group="canary"} 2
+
+# Overwrite label with output. Don't do this.
+#eval instant at 5m count_values by (job, group)("job", version)
+# {job="6", group="production"} 5
+# {job="8", group="canary"} 2
+# {job="7", group="canary"} 2
+
+
+# Tests for quantile.
+clear
+
+load 10s
+ data{test="two samples",point="a"} 0
+ data{test="two samples",point="b"} 1
+ data{test="three samples",point="a"} 0
+ data{test="three samples",point="b"} 1
+ data{test="three samples",point="c"} 2
+ data{test="uneven samples",point="a"} 0
+ data{test="uneven samples",point="b"} 1
+ data{test="uneven samples",point="c"} 4
+ foo .8
+
+# FAILING issue #8 (quantile)
+#eval instant at 1m quantile without(point)(0.8, data)
+# {test="two samples"} 0.8
+# {test="three samples"} 1.6
+# {test="uneven samples"} 2.8
+
+# Bug #5276.
+#eval instant at 1m quantile without(point)(scalar(foo), data)
+# {test="two samples"} 0.8
+# {test="three samples"} 1.6
+# {test="uneven samples"} 2.8
+
+
+#eval instant at 1m quantile without(point)((scalar(foo)), data)
+# {test="two samples"} 0.8
+# {test="three samples"} 1.6
+# {test="uneven samples"} 2.8
\ No newline at end of file
diff --git a/src/query/test/compatibility/testdata/functions.test b/src/query/test/compatibility/testdata/functions.test
new file mode 100644
index 0000000000..bef714ed38
--- /dev/null
+++ b/src/query/test/compatibility/testdata/functions.test
@@ -0,0 +1,643 @@
+# Testdata for resets() and changes().
+load 5m
+ http_requests{path="/foo"} 1 2 3 0 1 0 0 1 2 0
+ http_requests{path="/bar"} 1 2 3 4 5 1 2 3 4 5
+ http_requests{path="/biz"} 0 0 0 0 0 1 1 1 1 1
+
+# Tests for resets().
+# FAILING issue #16
+#eval instant at 50m resets(http_requests[5m])
+# {path="/foo"} 0
+# {path="/bar"} 0
+# {path="/biz"} 0
+
+eval instant at 50m resets(http_requests[20m])
+ {path="/foo"} 1
+ {path="/bar"} 0
+ {path="/biz"} 0
+
+eval instant at 50m resets(http_requests[30m])
+ {path="/foo"} 2
+ {path="/bar"} 1
+ {path="/biz"} 0
+
+eval instant at 50m resets(http_requests[50m])
+ {path="/foo"} 3
+ {path="/bar"} 1
+ {path="/biz"} 0
+
+eval instant at 50m resets(nonexistent_metric[50m])
+
+# Tests for changes().
+# FAILING issue #17
+#eval instant at 50m changes(http_requests[5m])
+# {path="/foo"} 0
+# {path="/bar"} 0
+# {path="/biz"} 0
+
+eval instant at 50m changes(http_requests[20m])
+ {path="/foo"} 3
+ {path="/bar"} 3
+ {path="/biz"} 0
+
+eval instant at 50m changes(http_requests[30m])
+ {path="/foo"} 4
+ {path="/bar"} 5
+ {path="/biz"} 1
+
+#eval instant at 50m changes(http_requests[50m])
+# {path="/foo"} 8
+# {path="/bar"} 9
+# {path="/biz"} 1
+
+#eval instant at 50m changes((http_requests[50m]))
+# {path="/foo"} 8
+# {path="/bar"} 9
+# {path="/biz"} 1
+
+eval instant at 50m changes(nonexistent_metric[50m])
+
+clear
+
+load 5m
+ x{a="b"} NaN NaN NaN
+ x{a="c"} 0 NaN 0
+
+# FAILING
+#eval instant at 15m changes(x[15m])
+# {a="b"} 0
+# {a="c"} 2
+
+clear
+
+# Tests for increase().
+load 5m
+ http_requests{path="/foo"} 0+10x10
+ http_requests{path="/bar"} 0+10x5 0+10x5
+
+# Tests for increase().
+eval instant at 50m increase(http_requests[50m])
+ {path="/foo"} 100
+ {path="/bar"} 90
+
+eval instant at 50m increase(http_requests[100m])
+ {path="/foo"} 100
+ {path="/bar"} 90
+
+clear
+
+# Test for increase() with counter reset.
+# When the counter is reset, it always starts at 0.
+# So the sequence 3 2 (decreasing counter = reset) is interpreted the same as 3 0 1 2.
+# Prometheus assumes it missed the intermediate values 0 and 1.
+load 5m
+ http_requests{path="/foo"} 0 1 2 3 2 3 4
+
+eval instant at 30m increase(http_requests[30m])
+ {path="/foo"} 7
+
+clear
+
+# Tests for irate().
+load 5m
+ http_requests{path="/foo"} 0+10x10
+ http_requests{path="/bar"} 0+10x5 0+10x5
+
+eval instant at 50m irate(http_requests[50m])
+ {path="/foo"} .03333333333333333333
+ {path="/bar"} .03333333333333333333
+
+# Counter reset.
+eval instant at 30m irate(http_requests[50m])
+ {path="/foo"} .03333333333333333333
+ {path="/bar"} 0
+
+clear
+
+# Tests for delta().
+load 5m
+ http_requests{path="/foo"} 0 50 100 150 200
+ http_requests{path="/bar"} 200 150 100 50 0
+
+eval instant at 20m delta(http_requests[20m])
+ {path="/foo"} 200
+ {path="/bar"} -200
+
+clear
+
+# Tests for idelta().
+load 5m
+ http_requests{path="/foo"} 0 50 100 150
+ http_requests{path="/bar"} 0 50 100 50
+
+eval instant at 20m idelta(http_requests[20m])
+ {path="/foo"} 50
+ {path="/bar"} -50
+
+clear
+
+# Tests for deriv() and predict_linear().
+load 5m
+ testcounter_reset_middle 0+10x4 0+10x5
+ http_requests{job="app-server", instance="1", group="canary"} 0+80x10
+
+# deriv should return the same as rate in simple cases.
+eval instant at 50m rate(http_requests{group="canary", instance="1", job="app-server"}[50m])
+ {group="canary", instance="1", job="app-server"} 0.26666666666666666
+
+eval instant at 50m deriv(http_requests{group="canary", instance="1", job="app-server"}[50m])
+ {group="canary", instance="1", job="app-server"} 0.26666666666666666
+
+# deriv should return correct result.
+eval instant at 50m deriv(testcounter_reset_middle[100m])
+ {} 0.010606060606060607
+
+# predict_linear should return correct result.
+# X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000]
+# Y = [ 0, 10, 20, 30, 40, 0, 10, 20, 30, 40, 50]
+# sumX = 16500
+# sumY = 250
+# sumXY = 480000
+# sumX2 = 34650000
+# n = 11
+# covXY = 105000
+# varX = 9900000
+# slope = 0.010606060606060607
+# intercept at t=0: 6.818181818181818
+# intercept at t=3000: 38.63636363636364
+# intercept at t=3000+3600: 76.81818181818181
+eval instant at 50m predict_linear(testcounter_reset_middle[100m], 3600)
+ {} 76.81818181818181
+
+# With http_requests, there is a sample value exactly at the end of
+# the range, and it has exactly the predicted value, so predict_linear
+# can be emulated with deriv.
+eval instant at 50m predict_linear(http_requests[50m], 3600) - (http_requests + deriv(http_requests[50m]) * 3600)
+ {group="canary", instance="1", job="app-server"} 0
+
+clear
+
+# Tests for label_replace.
+load 5m
+ testmetric{src="source-value-10",dst="original-destination-value"} 0
+ testmetric{src="source-value-20",dst="original-destination-value"} 1
+
+# label_replace does a full-string match and replace.
+eval instant at 0m label_replace(testmetric, "dst", "destination-value-$1", "src", "source-value-(.*)")
+ testmetric{src="source-value-10",dst="destination-value-10"} 0
+ testmetric{src="source-value-20",dst="destination-value-20"} 1
+
+# FAILING. label_replace does not do a sub-string match.
+#eval instant at 0m label_replace(testmetric, "dst", "destination-value-$1", "src", "value-(.*)")
+# testmetric{src="source-value-10",dst="original-destination-value"} 0
+# testmetric{src="source-value-20",dst="original-destination-value"} 1
+
+# label_replace works with multiple capture groups.
+eval instant at 0m label_replace(testmetric, "dst", "$1-value-$2", "src", "(.*)-value-(.*)")
+ testmetric{src="source-value-10",dst="source-value-10"} 0
+ testmetric{src="source-value-20",dst="source-value-20"} 1
+
+# label_replace does not overwrite the destination label if the source label
+# does not exist.
+eval instant at 0m label_replace(testmetric, "dst", "value-$1", "nonexistent-src", "source-value-(.*)")
+ testmetric{src="source-value-10",dst="original-destination-value"} 0
+ testmetric{src="source-value-20",dst="original-destination-value"} 1
+
+# FAILING. label_replace overwrites the destination label if the source label is empty,
+# but matched.
+#eval instant at 0m label_replace(testmetric, "dst", "value-$1", "nonexistent-src", "(.*)")
+# testmetric{src="source-value-10",dst="value-"} 0
+# testmetric{src="source-value-20",dst="value-"} 1
+
+# label_replace does not overwrite the destination label if the source label
+# is not matched.
+eval instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "non-matching-regex")
+ testmetric{src="source-value-10",dst="original-destination-value"} 0
+ testmetric{src="source-value-20",dst="original-destination-value"} 1
+
+# FAILING. eval instant at 0m label_replace((((testmetric))), (("dst")), (("value-$1")), (("src")), (("non-matching-regex")))
+# testmetric{src="source-value-10",dst="original-destination-value"} 0
+# testmetric{src="source-value-20",dst="original-destination-value"} 1
+
+# FAILING. label_replace drops labels that are set to empty values.
+#eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*")
+# testmetric{src="source-value-10"} 0
+# testmetric{src="source-value-20"} 1
+
+# label_replace fails when the regex is invalid.
+eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*")
+
+# FAILING. label_replace fails when the destination label name is not a valid Prometheus label name.
+#eval_fail instant at 0m label_replace(testmetric, "invalid-label-name", "", "src", "(.*)")
+
+# FAILING. label_replace fails when there would be duplicated identical output label sets.
+#eval_fail instant at 0m label_replace(testmetric, "src", "", "", "")
+
+clear
+
+# Tests for vector, time and timestamp.
+load 10s
+ metric 1 1
+
+# FAILING issue #23. eval instant at 0s timestamp(metric)
+# {} 0
+
+# FAILING issue #23. eval instant at 5s timestamp(metric)
+# {} 0
+
+# FAILING issue #23. eval instant at 10s timestamp(metric)
+# {} 10
+
+# FAILING issue #23. eval instant at 10s timestamp(((metric)))
+# {} 10
+
+# Tests for label_join.
+load 5m
+ testmetric{src="a",src1="b",src2="c",dst="original-destination-value"} 0
+ testmetric{src="d",src1="e",src2="f",dst="original-destination-value"} 1
+
+# label_join joins all src values in order.
+eval instant at 0m label_join(testmetric, "dst", "-", "src", "src1", "src2")
+ testmetric{src="a",src1="b",src2="c",dst="a-b-c"} 0
+ testmetric{src="d",src1="e",src2="f",dst="d-e-f"} 1
+
+# FAILING. label_join treats non existent src labels as empty strings.
+#eval instant at 0m label_join(testmetric, "dst", "-", "src", "src3", "src1")
+# testmetric{src="a",src1="b",src2="c",dst="a--b"} 0
+# testmetric{src="d",src1="e",src2="f",dst="d--e"} 1
+
+# FAILING. label_join overwrites the destination label even if the resulting dst label is empty string
+#eval instant at 0m label_join(testmetric, "dst", "", "emptysrc", "emptysrc1", "emptysrc2")
+# testmetric{src="a",src1="b",src2="c"} 0
+# testmetric{src="d",src1="e",src2="f"} 1
+
+# test without src label for label_join
+# FAILING. eval instant at 0m label_join(testmetric, "dst", ", ")
+# testmetric{src="a",src1="b",src2="c"} 0
+# testmetric{src="d",src1="e",src2="f"} 1
+
+# test without dst label for label_join
+load 5m
+ testmetric1{src="foo",src1="bar",src2="foobar"} 0
+ testmetric1{src="fizz",src1="buzz",src2="fizzbuzz"} 1
+
+# label_join creates dst label if not present.
+eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2")
+ testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0
+ testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1
+
+clear
+
+# Tests for vector.
+# FAILING issue #51. eval instant at 0m vector(1)
+# {} 1
+
+# FAILING issue #27. eval instant at 0s vector(time())
+# {} 0
+
+# FAILING issue #27. eval instant at 5s vector(time())
+# {} 5
+
+# FAILING issue #27. eval instant at 60m vector(time())
+# {} 3600
+
+
+# Tests for clamp_max and clamp_min().
+load 5m
+ test_clamp{src="clamp-a"} -50
+ test_clamp{src="clamp-b"} 0
+ test_clamp{src="clamp-c"} 100
+
+eval instant at 0m clamp_max(test_clamp, 75)
+ {src="clamp-a"} -50
+ {src="clamp-b"} 0
+ {src="clamp-c"} 75
+
+eval instant at 0m clamp_min(test_clamp, -25)
+ {src="clamp-a"} -25
+ {src="clamp-b"} 0
+ {src="clamp-c"} 100
+
+eval instant at 0m clamp_max(clamp_min(test_clamp, -20), 70)
+ {src="clamp-a"} -20
+ {src="clamp-b"} 0
+ {src="clamp-c"} 70
+
+eval instant at 0m clamp_max((clamp_min(test_clamp, (-20))), (70))
+ {src="clamp-a"} -20
+ {src="clamp-b"} 0
+ {src="clamp-c"} 70
+
+
+# Tests for sort/sort_desc.
+clear
+load 5m
+ http_requests{job="api-server", instance="0", group="production"} 0+10x10
+ http_requests{job="api-server", instance="1", group="production"} 0+20x10
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x10
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x10
+ http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
+ http_requests{job="app-server", instance="0", group="production"} 0+50x10
+ http_requests{job="app-server", instance="1", group="production"} 0+60x10
+ http_requests{job="app-server", instance="0", group="canary"} 0+70x10
+ http_requests{job="app-server", instance="1", group="canary"} 0+80x10
+
+# FAILING issue #28:
+#eval_ordered instant at 50m sort(http_requests)
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="production", instance="1", job="api-server"} 200
+# http_requests{group="canary", instance="0", job="api-server"} 300
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="production", instance="0", job="app-server"} 500
+# http_requests{group="production", instance="1", job="app-server"} 600
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="canary", instance="2", job="api-server"} NaN
+
+#eval_ordered instant at 50m sort_desc(http_requests)
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="production", instance="1", job="app-server"} 600
+# http_requests{group="production", instance="0", job="app-server"} 500
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="canary", instance="0", job="api-server"} 300
+# http_requests{group="production", instance="1", job="api-server"} 200
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="canary", instance="2", job="api-server"} NaN
+
+# Tests for holt_winters
+clear
+
+# positive trends
+load 10s
+ http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000
+ http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x2000
+
+eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1)
+ {job="api-server", instance="0", group="production"} 8000
+ {job="api-server", instance="1", group="production"} 16000
+ {job="api-server", instance="0", group="canary"} 24000
+ {job="api-server", instance="1", group="canary"} 32000
+
+# negative trends
+clear
+load 10s
+ http_requests{job="api-server", instance="0", group="production"} 8000-10x1000
+ http_requests{job="api-server", instance="1", group="production"} 0-20x1000
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000
+ http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000
+
+eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1)
+ {job="api-server", instance="0", group="production"} 0
+ {job="api-server", instance="1", group="production"} -16000
+ {job="api-server", instance="0", group="canary"} 24000
+ {job="api-server", instance="1", group="canary"} -32000
+
+# Tests for avg_over_time
+clear
+load 10s
+ metric 1 2 3 4 5
+
+eval instant at 1m avg_over_time(metric[1m])
+ {} 3
+
+# Tests for stddev_over_time and stdvar_over_time.
+clear
+load 10s
+ metric 0 8 8 2 3
+
+eval instant at 1m stdvar_over_time(metric[1m])
+ {} 10.56
+
+eval instant at 1m stddev_over_time(metric[1m])
+ {} 3.249615
+
+# FAILING issue #19. eval instant at 1m stddev_over_time((metric[1m]))
+# {} 3.249615
+
+# Tests for stddev_over_time and stdvar_over_time #4927.
+clear
+load 10s
+ metric 1.5990505637277868 1.5990505637277868 1.5990505637277868
+
+eval instant at 1m stdvar_over_time(metric[1m])
+ {} 0
+
+eval instant at 1m stddev_over_time(metric[1m])
+ {} 0
+
+# Tests for quantile_over_time
+clear
+
+load 10s
+ data{test="two samples"} 0 1
+ data{test="three samples"} 0 1 2
+ data{test="uneven samples"} 0 1 4
+
+eval instant at 1m quantile_over_time(0, data[1m])
+ {test="two samples"} 0
+ {test="three samples"} 0
+ {test="uneven samples"} 0
+
+eval instant at 1m quantile_over_time(0.5, data[1m])
+ {test="two samples"} 0.5
+ {test="three samples"} 1
+ {test="uneven samples"} 1
+
+eval instant at 1m quantile_over_time(0.75, data[1m])
+ {test="two samples"} 0.75
+ {test="three samples"} 1.5
+ {test="uneven samples"} 2.5
+
+eval instant at 1m quantile_over_time(0.8, data[1m])
+ {test="two samples"} 0.8
+ {test="three samples"} 1.6
+ {test="uneven samples"} 2.8
+
+eval instant at 1m quantile_over_time(1, data[1m])
+ {test="two samples"} 1
+ {test="three samples"} 2
+ {test="uneven samples"} 4
+
+eval instant at 1m quantile_over_time(-1, data[1m])
+ {test="two samples"} -Inf
+ {test="three samples"} -Inf
+ {test="uneven samples"} -Inf
+
+eval instant at 1m quantile_over_time(2, data[1m])
+ {test="two samples"} +Inf
+ {test="three samples"} +Inf
+ {test="uneven samples"} +Inf
+
+# FAILING issue #19. eval instant at 1m (quantile_over_time(2, (data[1m])))
+# {test="two samples"} +Inf
+# {test="three samples"} +Inf
+# {test="uneven samples"} +Inf
+
+clear
+
+# FAILING issue #30. Test time-related functions.
+#eval instant at 0m year()
+# {} 1970
+
+#eval instant at 1ms time()
+# 0.001
+
+eval instant at 0m year(vector(1136239445))
+ {} 2006
+
+#eval instant at 0m month()
+# {} 1
+
+eval instant at 0m month(vector(1136239445))
+ {} 1
+
+#eval instant at 0m day_of_month()
+# {} 1
+
+eval instant at 0m day_of_month(vector(1136239445))
+ {} 2
+
+# Thursday.
+#eval instant at 0m day_of_week()
+# {} 4
+
+eval instant at 0m day_of_week(vector(1136239445))
+ {} 1
+
+#eval instant at 0m hour()
+# {} 0
+
+eval instant at 0m hour(vector(1136239445))
+ {} 22
+
+#eval instant at 0m minute()
+# {} 0
+
+eval instant at 0m minute(vector(1136239445))
+ {} 4
+
+# 2008-12-31 23:59:59 just before leap second.
+eval instant at 0m year(vector(1230767999))
+ {} 2008
+
+# 2009-01-01 00:00:00 just after leap second.
+eval instant at 0m year(vector(1230768000))
+ {} 2009
+
+# 2016-02-29 23:59:59 February 29th in leap year.
+eval instant at 0m month(vector(1456790399)) + day_of_month(vector(1456790399)) / 100
+ {} 2.29
+
+# 2016-03-01 00:00:00 March 1st in leap year.
+eval instant at 0m month(vector(1456790400)) + day_of_month(vector(1456790400)) / 100
+ {} 3.01
+
+# February 1st 2016 in leap year.
+eval instant at 0m days_in_month(vector(1454284800))
+ {} 29
+
+# February 1st 2017 not in leap year.
+eval instant at 0m days_in_month(vector(1485907200))
+ {} 28
+
+clear
+
+# Test duplicate labelset in promql output.
+load 5m
+ testmetric1{src="a",dst="b"} 0
+ testmetric2{src="a",dst="b"} 1
+
+# FAILING. eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m])
+
+# Tests for *_over_time
+clear
+
+load 10s
+ data{type="numbers"} 2 0 3
+ data{type="some_nan"} 2 0 NaN
+ data{type="some_nan2"} 2 NaN 1
+ data{type="some_nan3"} NaN 0 1
+ data{type="only_nan"} NaN NaN NaN
+
+# Failing with keepNaN feature. eval instant at 1m min_over_time(data[1m])
+# {type="numbers"} 0
+# {type="some_nan"} 0
+# {type="some_nan2"} 1
+# {type="some_nan3"} 0
+# {type="only_nan"} NaN
+
+# Failing with keepNaN feature. eval instant at 1m max_over_time(data[1m])
+# {type="numbers"} 3
+# {type="some_nan"} 2
+# {type="some_nan2"} 2
+# {type="some_nan3"} 1
+# {type="only_nan"} NaN
+
+clear
+
+# FAILING issue #6. Testdata for absent_over_time()
+#eval instant at 1m absent_over_time(http_requests[5m])
+# {} 1
+
+# FAILING issue #6. eval instant at 1m absent_over_time(http_requests{handler="/foo"}[5m])
+# {handler="/foo"} 1
+
+# FAILING issue #6. eval instant at 1m absent_over_time(http_requests{handler!="/foo"}[5m])
+# {} 1
+
+# FAILING issue #6. eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", handler="/foobar"}[5m])
+# {} 1
+
+# FAILING issue #6. eval instant at 1m absent_over_time(rate(nonexistant[5m])[5m:])
+# {} 1
+
+# FAILING issue #6. eval instant at 1m absent_over_time(http_requests{handler="/foo", handler="/bar", instance="127.0.0.1"}[5m])
+# {instance="127.0.0.1"} 1
+
+load 1m
+ http_requests{path="/foo",instance="127.0.0.1",job="httpd"} 1+1x10
+ http_requests{path="/bar",instance="127.0.0.1",job="httpd"} 1+1x10
+ httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15
+ httpd_log_lines_total{instance="127.0.0.1",job="node"} 1
+ ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN
+
+# FAILING issue #6. eval instant at 5m absent_over_time(http_requests[5m])
+
+# FAILING issue #6. eval instant at 5m absent_over_time(rate(http_requests[5m])[5m:1m])
+
+# FAILING issue #6. eval instant at 0m absent_over_time(httpd_log_lines_total[30s])
+
+# FAILING issue #6. eval instant at 1m absent_over_time(httpd_log_lines_total[30s])
+# {} 1
+
+# FAILING issue #6. eval instant at 15m absent_over_time(http_requests[5m])
+
+# FAILING issue #6. eval instant at 16m absent_over_time(http_requests[5m])
+# {} 1
+
+# FAILING issue #6. eval instant at 16m absent_over_time(http_requests[6m])
+
+# FAILING issue #6. eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m])
+
+# FAILING issue #6. eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
+
+# FAILING issue #6. eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m])
+
+# FAILING issue #6. eval instant at 21m absent_over_time({instance="127.0.0.1"}[5m])
+# FAILING issue #6. {instance="127.0.0.1"} 1
+
+# FAILING issue #6. eval instant at 21m absent_over_time({instance="127.0.0.1"}[20m])
+
+# FAILING issue #6. eval instant at 21m absent_over_time({job="grok"}[20m])
+# FAILING issue #6. {job="grok"} 1
+
+# FAILING issue #6. eval instant at 30m absent_over_time({instance="127.0.0.1"}[5m:5s])
+# FAILING issue #6. {} 1
+
+# FAILING issue #6. eval instant at 5m absent_over_time({job="ingress"}[4m])
+
+# FAILING issue #6. eval instant at 10m absent_over_time({job="ingress"}[4m])
+# FAILING issue #6. {job="ingress"} 1
diff --git a/src/query/test/compatibility/testdata/histograms.test b/src/query/test/compatibility/testdata/histograms.test
new file mode 100644
index 0000000000..5e693ce9d6
--- /dev/null
+++ b/src/query/test/compatibility/testdata/histograms.test
@@ -0,0 +1,185 @@
+# Two histograms with 4 buckets each (x_sum and x_count not included,
+# only buckets). Lowest bucket for one histogram < 0, for the other >
+# 0. They have the same name, just separated by label. Not useful in
+# practice, but can happen (if clients change bucketing), and the
+# server has to cope with it.
+
+# Test histogram.
+load 5m
+ testhistogram_bucket{le="0.1", start="positive"} 0+5x10
+ testhistogram_bucket{le=".2", start="positive"} 0+7x10
+ testhistogram_bucket{le="1e0", start="positive"} 0+11x10
+ testhistogram_bucket{le="+Inf", start="positive"} 0+12x10
+ testhistogram_bucket{le="-.2", start="negative"} 0+1x10
+ testhistogram_bucket{le="-0.1", start="negative"} 0+2x10
+ testhistogram_bucket{le="0.3", start="negative"} 0+2x10
+ testhistogram_bucket{le="+Inf", start="negative"} 0+3x10
+
+
+# Now a more realistic histogram per job and instance to test aggregation.
+load 5m
+ request_duration_seconds_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
+ request_duration_seconds_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
+ request_duration_seconds_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
+ request_duration_seconds_bucket{job="job1", instance="ins2", le="0.1"} 0+2x10
+ request_duration_seconds_bucket{job="job1", instance="ins2", le="0.2"} 0+5x10
+ request_duration_seconds_bucket{job="job1", instance="ins2", le="+Inf"} 0+6x10
+ request_duration_seconds_bucket{job="job2", instance="ins1", le="0.1"} 0+3x10
+ request_duration_seconds_bucket{job="job2", instance="ins1", le="0.2"} 0+4x10
+ request_duration_seconds_bucket{job="job2", instance="ins1", le="+Inf"} 0+6x10
+ request_duration_seconds_bucket{job="job2", instance="ins2", le="0.1"} 0+4x10
+ request_duration_seconds_bucket{job="job2", instance="ins2", le="0.2"} 0+7x10
+ request_duration_seconds_bucket{job="job2", instance="ins2", le="+Inf"} 0+9x10
+
+# Different le representations in one histogram.
+load 5m
+ mixed_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
+ mixed_bucket{job="job1", instance="ins1", le="0.2"} 0+1x10
+ mixed_bucket{job="job1", instance="ins1", le="2e-1"} 0+1x10
+ mixed_bucket{job="job1", instance="ins1", le="2.0e-1"} 0+1x10
+ mixed_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
+ mixed_bucket{job="job1", instance="ins2", le="+inf"} 0+0x10
+ mixed_bucket{job="job1", instance="ins2", le="+Inf"} 0+0x10
+
+# Quantile too low.
+eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
+ {start="positive"} -Inf
+ {start="negative"} -Inf
+
+# Quantile too high.
+eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
+ {start="positive"} +Inf
+ {start="negative"} +Inf
+
+# Quantile value in lowest bucket, which is positive.
+eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="positive"})
+ {start="positive"} 0
+
+# Quantile value in lowest bucket, which is negative.
+eval instant at 50m histogram_quantile(0, testhistogram_bucket{start="negative"})
+ {start="negative"} -0.2
+
+# Quantile value in highest bucket.
+eval instant at 50m histogram_quantile(1, testhistogram_bucket)
+ {start="positive"} 1
+ {start="negative"} 0.3
+
+# Finally some useful quantiles.
+eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
+ {start="positive"} 0.048
+ {start="negative"} -0.2
+
+
+eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
+ {start="positive"} 0.15
+ {start="negative"} -0.15
+
+eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
+ {start="positive"} 0.72
+ {start="negative"} 0.3
+
+# More realistic with rates.
+eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
+ {start="positive"} 0.048
+ {start="negative"} -0.2
+
+eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
+ {start="positive"} 0.15
+ {start="negative"} -0.15
+
+eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
+ {start="positive"} 0.72
+ {start="negative"} 0.3
+
+# Aggregated histogram: Everything in one.
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
+ {} 0.075
+
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
+ {} 0.1277777777777778
+
+# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
+eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
+ {} 0.075
+
+eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
+ {} 0.12777777777777778
+
+# Aggregated histogram: By job.
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
+ {instance="ins1"} 0.075
+ {instance="ins2"} 0.075
+
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
+ {instance="ins1"} 0.1333333333
+ {instance="ins2"} 0.125
+
+# Aggregated histogram: By instance.
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
+ {job="job1"} 0.1
+ {job="job2"} 0.0642857142857143
+
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
+ {job="job1"} 0.14
+ {job="job2"} 0.1125
+
+# Aggregated histogram: By job and instance.
+eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
+ {instance="ins1", job="job1"} 0.11
+ {instance="ins2", job="job1"} 0.09
+ {instance="ins1", job="job2"} 0.06
+ {instance="ins2", job="job2"} 0.0675
+
+eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
+ {instance="ins1", job="job1"} 0.15
+ {instance="ins2", job="job1"} 0.1333333333333333
+ {instance="ins1", job="job2"} 0.1
+ {instance="ins2", job="job2"} 0.1166666666666667
+
+# The unaggregated histogram for comparison. Same result as the previous one.
+eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
+ {instance="ins1", job="job1"} 0.11
+ {instance="ins2", job="job1"} 0.09
+ {instance="ins1", job="job2"} 0.06
+ {instance="ins2", job="job2"} 0.0675
+
+eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
+ {instance="ins1", job="job1"} 0.15
+ {instance="ins2", job="job1"} 0.13333333333333333
+ {instance="ins1", job="job2"} 0.1
+ {instance="ins2", job="job2"} 0.11666666666666667
+
+# A histogram with nonmonotonic bucket counts. This may happen when recording
+# rule evaluation or federation races scrape ingestion, causing some buckets
+# counts to be derived from fewer samples.
+
+load 5m
+ nonmonotonic_bucket{le="0.1"} 0+2x10
+ nonmonotonic_bucket{le="1"} 0+1x10
+ nonmonotonic_bucket{le="10"} 0+5x10
+ nonmonotonic_bucket{le="100"} 0+4x10
+ nonmonotonic_bucket{le="1000"} 0+9x10
+ nonmonotonic_bucket{le="+Inf"} 0+8x10
+
+# Nonmonotonic buckets
+eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket)
+ {} 0.0045
+
+eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket)
+ {} 8.5
+
+eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
+ {} 979.75
+
+# FAILING issue #48. Buckets with different representations of the same upper bound.
+# eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m]))
+# {instance="ins1", job="job1"} 0.15
+# {instance="ins2", job="job1"} NaN
+
+# Failing with keepNaN feature. eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m]))
+# {instance="ins1", job="job1"} 0.2
+# {instance="ins2", job="job1"} NaN
+
+# Failing with keepNaN feature. eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m]))
+# {instance="ins1", job="job1"} 0.2
+# {instance="ins2", job="job1"} NaN
diff --git a/src/query/test/compatibility/testdata/legacy.test b/src/query/test/compatibility/testdata/legacy.test
new file mode 100644
index 0000000000..10a7143ae5
--- /dev/null
+++ b/src/query/test/compatibility/testdata/legacy.test
@@ -0,0 +1,392 @@
+load 5m
+ http_requests{job="api-server", instance="0", group="production"} 0+10x10
+ http_requests{job="api-server", instance="1", group="production"} 0+20x10
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x10
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x10
+ http_requests{job="app-server", instance="0", group="production"} 0+50x10
+ http_requests{job="app-server", instance="1", group="production"} 0+60x10
+ http_requests{job="app-server", instance="0", group="canary"} 0+70x10
+ http_requests{job="app-server", instance="1", group="canary"} 0+80x10
+
+load 5m
+ x{y="testvalue"} 0+10x10
+
+load 5m
+ testcounter_reset_middle 0+10x4 0+10x5
+ testcounter_reset_end 0+10x9 0 10
+
+load 4m
+ testcounter_zero_cutoff{start="0m"} 0+240x10
+ testcounter_zero_cutoff{start="1m"} 60+240x10
+ testcounter_zero_cutoff{start="2m"} 120+240x10
+ testcounter_zero_cutoff{start="3m"} 180+240x10
+ testcounter_zero_cutoff{start="4m"} 240+240x10
+ testcounter_zero_cutoff{start="5m"} 300+240x10
+
+load 5m
+ label_grouping_test{a="aa", b="bb"} 0+10x10
+ label_grouping_test{a="a", b="abb"} 0+20x10
+
+load 5m
+ vector_matching_a{l="x"} 0+1x100
+ vector_matching_a{l="y"} 0+2x50
+ vector_matching_b{l="x"} 0+4x25
+
+load 5m
+ cpu_count{instance="0", type="numa"} 0+30x10
+ cpu_count{instance="0", type="smp"} 0+10x20
+ cpu_count{instance="1", type="smp"} 0+20x10
+
+
+eval instant at 50m SUM(http_requests)
+ {} 3600
+
+eval instant at 50m SUM(http_requests{instance="0"}) BY(job)
+ {job="api-server"} 400
+ {job="app-server"} 1200
+
+eval instant at 50m SUM(http_requests) BY (job)
+ {job="api-server"} 1000
+ {job="app-server"} 2600
+
+# Non-existent labels mentioned in BY-clauses shouldn't propagate to output.
+eval instant at 50m SUM(http_requests) BY (job, nonexistent)
+ {job="api-server"} 1000
+ {job="app-server"} 2600
+
+
+eval instant at 50m COUNT(http_requests) BY (job)
+ {job="api-server"} 4
+ {job="app-server"} 4
+
+
+eval instant at 50m SUM(http_requests) BY (job, group)
+ {group="canary", job="api-server"} 700
+ {group="canary", job="app-server"} 1500
+ {group="production", job="api-server"} 300
+ {group="production", job="app-server"} 1100
+
+
+eval instant at 50m AVG(http_requests) BY (job)
+ {job="api-server"} 250
+ {job="app-server"} 650
+
+
+eval instant at 50m MIN(http_requests) BY (job)
+ {job="api-server"} 100
+ {job="app-server"} 500
+
+
+eval instant at 50m MAX(http_requests) BY (job)
+ {job="api-server"} 400
+ {job="app-server"} 800
+
+
+# Single-letter label names and values.
+eval instant at 50m x{y="testvalue"}
+ x{y="testvalue"} 100
+
+
+# Rates should calculate per-second rates.
+eval instant at 50m rate(http_requests{group="canary", instance="1", job="app-server"}[50m])
+ {group="canary", instance="1", job="app-server"} 0.26666666666666666
+
+
+# Counter resets at in the middle of range are handled correctly by rate().
+eval instant at 50m rate(testcounter_reset_middle[50m])
+ {} 0.03
+
+
+# Counter resets at end of range are ignored by rate().
+eval instant at 50m rate(testcounter_reset_end[5m])
+ {} 0
+
+
+# Zero cutoff for left-side extrapolation.
+eval instant at 10m rate(testcounter_zero_cutoff[20m])
+ {start="0m"} 0.5
+ {start="1m"} 0.55
+ {start="2m"} 0.6
+ {start="3m"} 0.65
+ {start="4m"} 0.7
+ {start="5m"} 0.6
+
+# Normal half-interval cutoff for left-side extrapolation.
+eval instant at 50m rate(testcounter_zero_cutoff[20m])
+ {start="0m"} 0.6
+ {start="1m"} 0.6
+ {start="2m"} 0.6
+ {start="3m"} 0.6
+ {start="4m"} 0.6
+ {start="5m"} 0.6
+
+
+eval instant at 50m http_requests{group!="canary"}
+ http_requests{group="production", instance="1", job="app-server"} 600
+ http_requests{group="production", instance="0", job="app-server"} 500
+ http_requests{group="production", instance="1", job="api-server"} 200
+ http_requests{group="production", instance="0", job="api-server"} 100
+
+eval instant at 50m http_requests{job=~".+-server",group!="canary"}
+ http_requests{group="production", instance="1", job="app-server"} 600
+ http_requests{group="production", instance="0", job="app-server"} 500
+ http_requests{group="production", instance="1", job="api-server"} 200
+ http_requests{group="production", instance="0", job="api-server"} 100
+
+eval instant at 50m http_requests{job!~"api-.+",group!="canary"}
+ http_requests{group="production", instance="1", job="app-server"} 600
+ http_requests{group="production", instance="0", job="app-server"} 500
+
+eval instant at 50m http_requests{group="production",job=~"api-.+"}
+ http_requests{group="production", instance="0", job="api-server"} 100
+ http_requests{group="production", instance="1", job="api-server"} 200
+
+eval instant at 50m abs(-1 * http_requests{group="production",job="api-server"})
+ {group="production", instance="0", job="api-server"} 100
+ {group="production", instance="1", job="api-server"} 200
+
+eval instant at 50m floor(0.004 * http_requests{group="production",job="api-server"})
+ {group="production", instance="0", job="api-server"} 0
+ {group="production", instance="1", job="api-server"} 0
+
+eval instant at 50m ceil(0.004 * http_requests{group="production",job="api-server"})
+ {group="production", instance="0", job="api-server"} 1
+ {group="production", instance="1", job="api-server"} 1
+
+eval instant at 50m round(0.004 * http_requests{group="production",job="api-server"})
+ {group="production", instance="0", job="api-server"} 0
+ {group="production", instance="1", job="api-server"} 1
+
+# Round should correctly handle negative numbers.
+eval instant at 50m round(-1 * (0.004 * http_requests{group="production",job="api-server"}))
+ {group="production", instance="0", job="api-server"} 0
+ {group="production", instance="1", job="api-server"} -1
+
+# Round should round half up.
+eval instant at 50m round(0.005 * http_requests{group="production",job="api-server"})
+ {group="production", instance="0", job="api-server"} 1
+ {group="production", instance="1", job="api-server"} 1
+
+eval instant at 50m round(-1 * (0.005 * http_requests{group="production",job="api-server"}))
+ {group="production", instance="0", job="api-server"} 0
+ {group="production", instance="1", job="api-server"} -1
+
+eval instant at 50m round(1 + 0.005 * http_requests{group="production",job="api-server"})
+ {group="production", instance="0", job="api-server"} 2
+ {group="production", instance="1", job="api-server"} 2
+
+eval instant at 50m round(-1 * (1 + 0.005 * http_requests{group="production",job="api-server"}))
+ {group="production", instance="0", job="api-server"} -1
+ {group="production", instance="1", job="api-server"} -2
+
+# Round should accept the number to round nearest to.
+eval instant at 50m round(0.0005 * http_requests{group="production",job="api-server"}, 0.1)
+ {group="production", instance="0", job="api-server"} 0.1
+ {group="production", instance="1", job="api-server"} 0.1
+
+eval instant at 50m round(2.1 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1)
+ {group="production", instance="0", job="api-server"} 2.2
+ {group="production", instance="1", job="api-server"} 2.2
+
+eval instant at 50m round(5.2 + 0.0005 * http_requests{group="production",job="api-server"}, 0.1)
+ {group="production", instance="0", job="api-server"} 5.3
+ {group="production", instance="1", job="api-server"} 5.3
+
+# Round should work correctly with negative numbers and multiple decimal places.
+eval instant at 50m round(-1 * (5.2 + 0.0005 * http_requests{group="production",job="api-server"}), 0.1)
+ {group="production", instance="0", job="api-server"} -5.2
+ {group="production", instance="1", job="api-server"} -5.3
+
+# Round should work correctly with big toNearests.
+eval instant at 50m round(0.025 * http_requests{group="production",job="api-server"}, 5)
+ {group="production", instance="0", job="api-server"} 5
+ {group="production", instance="1", job="api-server"} 5
+
+eval instant at 50m round(0.045 * http_requests{group="production",job="api-server"}, 5)
+ {group="production", instance="0", job="api-server"} 5
+ {group="production", instance="1", job="api-server"} 10
+
+eval instant at 50m avg_over_time(http_requests{group="production",job="api-server"}[1h])
+ {group="production", instance="0", job="api-server"} 50
+ {group="production", instance="1", job="api-server"} 100
+
+eval instant at 50m count_over_time(http_requests{group="production",job="api-server"}[1h])
+ {group="production", instance="0", job="api-server"} 11
+ {group="production", instance="1", job="api-server"} 11
+
+eval instant at 50m max_over_time(http_requests{group="production",job="api-server"}[1h])
+ {group="production", instance="0", job="api-server"} 100
+ {group="production", instance="1", job="api-server"} 200
+
+eval instant at 50m min_over_time(http_requests{group="production",job="api-server"}[1h])
+ {group="production", instance="0", job="api-server"} 0
+ {group="production", instance="1", job="api-server"} 0
+
+eval instant at 50m sum_over_time(http_requests{group="production",job="api-server"}[1h])
+ {group="production", instance="0", job="api-server"} 550
+ {group="production", instance="1", job="api-server"} 1100
+
+# FAILING. eval instant at 50m time()
+# 3000
+
+eval instant at 50m {__name__=~".+"}
+ http_requests{group="canary", instance="0", job="api-server"} 300
+ http_requests{group="canary", instance="0", job="app-server"} 700
+ http_requests{group="canary", instance="1", job="api-server"} 400
+ http_requests{group="canary", instance="1", job="app-server"} 800
+ http_requests{group="production", instance="0", job="api-server"} 100
+ http_requests{group="production", instance="0", job="app-server"} 500
+ http_requests{group="production", instance="1", job="api-server"} 200
+ http_requests{group="production", instance="1", job="app-server"} 600
+ testcounter_reset_end 0
+ testcounter_reset_middle 50
+ x{y="testvalue"} 100
+ label_grouping_test{a="a", b="abb"} 200
+ label_grouping_test{a="aa", b="bb"} 100
+ vector_matching_a{l="x"} 10
+ vector_matching_a{l="y"} 20
+ vector_matching_b{l="x"} 40
+ cpu_count{instance="1", type="smp"} 200
+ cpu_count{instance="0", type="smp"} 100
+ cpu_count{instance="0", type="numa"} 300
+
+
+eval instant at 50m {job=~".+-server", job!~"api-.+"}
+ http_requests{group="canary", instance="0", job="app-server"} 700
+ http_requests{group="canary", instance="1", job="app-server"} 800
+ http_requests{group="production", instance="0", job="app-server"} 500
+ http_requests{group="production", instance="1", job="app-server"} 600
+
+# eval instant at 50m absent(nonexistent)
+# {} 1
+
+# FAILING issue #52. eval instant at 50m absent(nonexistent{job="testjob", instance="testinstance", method=~".x"})
+# {instance="testinstance", job="testjob"} 1
+
+# FAILING issue #52. eval instant at 50m absent(nonexistent{job="testjob",job="testjob2",foo="bar"})
+# {foo="bar"} 1
+
+# FAILING issue #52. eval instant at 50m absent(nonexistent{job="testjob",job="testjob2",job="three",foo="bar"})
+# {foo="bar"} 1
+
+# FAILING issue #52. eval instant at 50m absent(nonexistent{job="testjob",job=~"testjob2",foo="bar"})
+# {foo="bar"} 1
+
+eval instant at 50m absent(http_requests)
+
+eval instant at 50m absent(sum(http_requests))
+
+# FAILING issue #52. eval instant at 50m absent(sum(nonexistent{job="testjob", instance="testinstance"}))
+# {} 1
+
+# FAILING issue #52. eval instant at 50m absent(max(nonexistant))
+# {} 1
+
+# FAILING issue #52. eval instant at 50m absent(nonexistant > 1)
+# {} 1
+
+# FAILING issue #52. eval instant at 50m absent(a + b)
+# {} 1
+
+# FAILING issue #52. eval instant at 50m absent(a and b)
+# {} 1
+
+# FAILING issue #52. eval instant at 50m absent(rate(nonexistant[5m]))
+# {} 1
+
+eval instant at 50m http_requests{group="production",job="api-server"} offset 5m
+ http_requests{group="production", instance="0", job="api-server"} 90
+ http_requests{group="production", instance="1", job="api-server"} 180
+
+eval instant at 50m rate(http_requests{group="production",job="api-server"}[10m] offset 5m)
+ {group="production", instance="0", job="api-server"} 0.03333333333333333
+ {group="production", instance="1", job="api-server"} 0.06666666666666667
+
+eval instant at 50m http_requests{group="canary", instance="0", job="api-server"} / 0
+ {group="canary", instance="0", job="api-server"} +Inf
+
+eval instant at 50m -1 * http_requests{group="canary", instance="0", job="api-server"} / 0
+ {group="canary", instance="0", job="api-server"} -Inf
+
+# Failing with keepNaN feature. eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} / 0
+# {group="canary", instance="0", job="api-server"} NaN
+
+# Failing with keepNaN feature. eval instant at 50m 0 * http_requests{group="canary", instance="0", job="api-server"} % 0
+# {group="canary", instance="0", job="api-server"} NaN
+
+# FAILING issue #53. eval instant at 50m exp(vector_matching_a)
+# {l="x"} 22026.465794806718
+# {l="y"} 485165195.4097903
+
+eval instant at 50m exp(vector_matching_a - 10)
+ {l="y"} 22026.465794806718
+ {l="x"} 1
+
+eval instant at 50m exp(vector_matching_a - 20)
+ {l="x"} 4.5399929762484854e-05
+ {l="y"} 1
+
+# FAILING issue #53. eval instant at 50m ln(vector_matching_a)
+# {l="x"} 2.302585092994046
+# {l="y"} 2.995732273553991
+
+eval instant at 50m ln(vector_matching_a - 10)
+ {l="y"} 2.302585092994046
+ {l="x"} -Inf
+
+# FAILING with keepNaN feature. eval instant at 50m ln(vector_matching_a - 20)
+# {l="y"} -Inf
+# {l="x"} NaN
+
+# FAILING issue #53. eval instant at 50m exp(ln(vector_matching_a))
+# {l="y"} 20
+# {l="x"} 10
+
+# FAILING issue #53. eval instant at 50m sqrt(vector_matching_a)
+# {l="x"} 3.1622776601683795
+# {l="y"} 4.47213595499958
+
+# FAILING issue #53. eval instant at 50m log2(vector_matching_a)
+# {l="x"} 3.3219280948873626
+# {l="y"} 4.321928094887363
+
+eval instant at 50m log2(vector_matching_a - 10)
+ {l="y"} 3.3219280948873626
+ {l="x"} -Inf
+
+# FAILING with keepNaN feature. eval instant at 50m log2(vector_matching_a - 20)
+# {l="x"} NaN
+# {l="y"} -Inf
+
+# FAILING issue #53. eval instant at 50m log10(vector_matching_a)
+# {l="x"} 1
+# {l="y"} 1.301029995663981
+
+eval instant at 50m log10(vector_matching_a - 10)
+ {l="y"} 1
+ {l="x"} -Inf
+
+# FAILING with keepNaN feature. eval instant at 50m log10(vector_matching_a - 20)
+# {l="x"} NaN
+# {l="y"} -Inf
+
+
+# Matrix tests.
+clear
+
+load 1h
+ testmetric{aa="bb"} 1
+ testmetric{a="abb"} 2
+
+eval instant at 0h testmetric
+ testmetric{aa="bb"} 1
+ testmetric{a="abb"} 2
+
+clear
+
+# Test duplicate labelset in promql output.
+load 5m
+ testmetric1{src="a",dst="b"} 0
+ testmetric2{src="a",dst="b"} 1
+
+# FAILING. eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
diff --git a/src/query/test/compatibility/testdata/literals.test b/src/query/test/compatibility/testdata/literals.test
new file mode 100644
index 0000000000..346d792610
--- /dev/null
+++ b/src/query/test/compatibility/testdata/literals.test
@@ -0,0 +1,59 @@
+eval instant at 50m 12.34e6
+ 12340000
+
+eval instant at 50m 12.34e+6
+ 12340000
+
+eval instant at 50m 12.34e-6
+ 0.00001234
+
+eval instant at 50m 1+1
+ 2
+
+eval instant at 50m 1-1
+ 0
+
+eval instant at 50m 1 - -1
+ 2
+
+eval instant at 50m .2
+ 0.2
+
+eval instant at 50m +0.2
+ 0.2
+
+eval instant at 50m -0.2e-6
+ -0.0000002
+
+eval instant at 50m +Inf
+ +Inf
+
+eval instant at 50m inF
+ +Inf
+
+eval instant at 50m -inf
+ -Inf
+
+eval instant at 50m NaN
+ NaN
+
+eval instant at 50m nan
+ NaN
+
+eval instant at 50m 2.
+ 2
+
+eval instant at 50m 1 / 0
+ +Inf
+
+eval instant at 50m ((1) / (0))
+ +Inf
+
+eval instant at 50m -1 / 0
+ -Inf
+
+eval instant at 50m 0 / 0
+ NaN
+
+eval instant at 50m 1 % 0
+ NaN
diff --git a/src/query/test/compatibility/testdata/operators.test b/src/query/test/compatibility/testdata/operators.test
new file mode 100644
index 0000000000..33137786c9
--- /dev/null
+++ b/src/query/test/compatibility/testdata/operators.test
@@ -0,0 +1,440 @@
+load 5m
+ http_requests{job="api-server", instance="0", group="production"} 0+10x10
+ http_requests{job="api-server", instance="1", group="production"} 0+20x10
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x10
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x10
+ http_requests{job="app-server", instance="0", group="production"} 0+50x10
+ http_requests{job="app-server", instance="1", group="production"} 0+60x10
+ http_requests{job="app-server", instance="0", group="canary"} 0+70x10
+ http_requests{job="app-server", instance="1", group="canary"} 0+80x10
+
+load 5m
+ vector_matching_a{l="x"} 0+1x100
+ vector_matching_a{l="y"} 0+2x50
+ vector_matching_b{l="x"} 0+4x25
+
+
+eval instant at 50m SUM(http_requests) BY (job) - COUNT(http_requests) BY (job)
+ {job="api-server"} 996
+ {job="app-server"} 2596
+
+eval instant at 50m 2 - SUM(http_requests) BY (job)
+ {job="api-server"} -998
+ {job="app-server"} -2598
+
+# FAILING issue #53. eval instant at 50m -http_requests{job="api-server",instance="0",group="production"}
+# {job="api-server",instance="0",group="production"} -100
+
+eval instant at 50m +http_requests{job="api-server",instance="0",group="production"}
+ http_requests{job="api-server",instance="0",group="production"} 100
+
+eval instant at 50m - - - SUM(http_requests) BY (job)
+ {job="api-server"} -1000
+ {job="app-server"} -2600
+
+eval instant at 50m - - - 1
+ -1
+
+# FAILING. eval instant at 50m -2^---1*3
+# -1.5
+
+# FAILING. eval instant at 50m 2/-2^---1*3+2
+# -10
+
+# FAILING. eval instant at 50m -10^3 * - SUM(http_requests) BY (job) ^ -1
+# {job="api-server"} 1
+# {job="app-server"} 0.38461538461538464
+
+eval instant at 50m 1000 / SUM(http_requests) BY (job)
+ {job="api-server"} 1
+ {job="app-server"} 0.38461538461538464
+
+eval instant at 50m SUM(http_requests) BY (job) - 2
+ {job="api-server"} 998
+ {job="app-server"} 2598
+
+eval instant at 50m SUM(http_requests) BY (job) % 3
+ {job="api-server"} 1
+ {job="app-server"} 2
+
+eval instant at 50m SUM(http_requests) BY (job) % 0.3
+ {job="api-server"} 0.1
+ {job="app-server"} 0.2
+
+eval instant at 50m SUM(http_requests) BY (job) ^ 2
+ {job="api-server"} 1000000
+ {job="app-server"} 6760000
+
+eval instant at 50m SUM(http_requests) BY (job) % 3 ^ 2
+ {job="api-server"} 1
+ {job="app-server"} 8
+
+eval instant at 50m SUM(http_requests) BY (job) % 2 ^ (3 ^ 2)
+ {job="api-server"} 488
+ {job="app-server"} 40
+
+eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2
+ {job="api-server"} 488
+ {job="app-server"} 40
+
+eval instant at 50m SUM(http_requests) BY (job) % 2 ^ 3 ^ 2 ^ 2
+ {job="api-server"} 1000
+ {job="app-server"} 2600
+
+eval instant at 50m COUNT(http_requests) BY (job) ^ COUNT(http_requests) BY (job)
+ {job="api-server"} 256
+ {job="app-server"} 256
+
+eval instant at 50m SUM(http_requests) BY (job) / 0
+ {job="api-server"} +Inf
+ {job="app-server"} +Inf
+
+eval instant at 50m SUM(http_requests) BY (job) + SUM(http_requests) BY (job)
+ {job="api-server"} 2000
+ {job="app-server"} 5200
+
+eval instant at 50m (SUM((http_requests)) BY (job)) + SUM(http_requests) BY (job)
+ {job="api-server"} 2000
+ {job="app-server"} 5200
+
+eval instant at 50m http_requests{job="api-server", group="canary"}
+ http_requests{group="canary", instance="0", job="api-server"} 300
+ http_requests{group="canary", instance="1", job="api-server"} 400
+
+eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60
+ {group="canary", instance="0", job="api-server"} 330
+ {group="canary", instance="1", job="api-server"} 440
+
+eval instant at 50m rate(http_requests[25m]) * 25 * 60
+ {group="canary", instance="0", job="api-server"} 150
+ {group="canary", instance="0", job="app-server"} 350
+ {group="canary", instance="1", job="api-server"} 200
+ {group="canary", instance="1", job="app-server"} 400
+ {group="production", instance="0", job="api-server"} 50
+ {group="production", instance="0", job="app-server"} 249.99999999999997
+ {group="production", instance="1", job="api-server"} 100
+ {group="production", instance="1", job="app-server"} 300
+
+# FAILING issue #19. eval instant at 50m (rate((http_requests[25m])) * 25) * 60
+# {group="canary", instance="0", job="api-server"} 150
+# {group="canary", instance="0", job="app-server"} 350
+# {group="canary", instance="1", job="api-server"} 200
+# {group="canary", instance="1", job="app-server"} 400
+# {group="production", instance="0", job="api-server"} 50
+# {group="production", instance="0", job="app-server"} 249.99999999999997
+# {group="production", instance="1", job="api-server"} 100
+# {group="production", instance="1", job="app-server"} 300
+
+
+# FAILING order of rows differs. eval instant at 50m http_requests{group="canary"} and http_requests{instance="0"}
+# http_requests{group="canary", instance="0", job="api-server"} 300
+# http_requests{group="canary", instance="0", job="app-server"} 700
+
+eval instant at 50m (http_requests{group="canary"} + 1) and http_requests{instance="0"}
+ {group="canary", instance="0", job="api-server"} 301
+ {group="canary", instance="0", job="app-server"} 701
+
+eval instant at 50m (http_requests{group="canary"} + 1) and on(instance, job) http_requests{instance="0", group="production"}
+ {group="canary", instance="0", job="api-server"} 301
+ {group="canary", instance="0", job="app-server"} 701
+
+eval instant at 50m (http_requests{group="canary"} + 1) and on(instance) http_requests{instance="0", group="production"}
+ {group="canary", instance="0", job="api-server"} 301
+ {group="canary", instance="0", job="app-server"} 701
+
+eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group) http_requests{instance="0", group="production"}
+ {group="canary", instance="0", job="api-server"} 301
+ {group="canary", instance="0", job="app-server"} 701
+
+eval instant at 50m (http_requests{group="canary"} + 1) and ignoring(group, job) http_requests{instance="0", group="production"}
+ {group="canary", instance="0", job="api-server"} 301
+ {group="canary", instance="0", job="app-server"} 701
+
+# FAILING order of rows differs. eval instant at 50m http_requests{group="canary"} or http_requests{group="production"}
+# http_requests{group="canary", instance="0", job="api-server"} 300
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="production", instance="0", job="app-server"} 500
+# http_requests{group="production", instance="1", job="api-server"} 200
+# http_requests{group="production", instance="1", job="app-server"} 600
+
+# On overlap the rhs samples must be dropped.
+# FAILING issue 34#. eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"}
+# {group="canary", instance="0", job="api-server"} 301
+# {group="canary", instance="0", job="app-server"} 701
+# {group="canary", instance="1", job="api-server"} 401
+# {group="canary", instance="1", job="app-server"} 801
+# http_requests{group="production", instance="1", job="api-server"} 200
+# http_requests{group="production", instance="1", job="app-server"} 600
+
+
+# Matching only on instance excludes everything that has instance=0/1 but includes
+# entries without the instance label.
+# FAILING issue 34#. eval instant at 50m (http_requests{group="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a)
+# {group="canary", instance="0", job="api-server"} 301
+# {group="canary", instance="0", job="app-server"} 701
+# {group="canary", instance="1", job="api-server"} 401
+# {group="canary", instance="1", job="app-server"} 801
+# vector_matching_a{l="x"} 10
+# vector_matching_a{l="y"} 20
+
+# FAILING issue 34#. eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, job) (http_requests or cpu_count or vector_matching_a)
+# {group="canary", instance="0", job="api-server"} 301
+# {group="canary", instance="0", job="app-server"} 701
+# {group="canary", instance="1", job="api-server"} 401
+# {group="canary", instance="1", job="app-server"} 801
+# vector_matching_a{l="x"} 10
+# vector_matching_a{l="y"} 20
+
+# FAILING issue 34#. eval instant at 50m http_requests{group="canary"} unless http_requests{instance="0"}
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="canary", instance="1", job="app-server"} 800
+
+# FAILING issue #35. eval instant at 50m http_requests{group="canary"} unless on(job) http_requests{instance="0"}
+
+# FAILING issue #34. eval instant at 50m http_requests{group="canary"} unless on(job, instance) http_requests{instance="0"}
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="canary", instance="1", job="app-server"} 800
+
+eval instant at 50m http_requests{group="canary"} / on(instance,job) http_requests{group="production"}
+ {instance="0", job="api-server"} 3
+ {instance="0", job="app-server"} 1.4
+ {instance="1", job="api-server"} 2
+ {instance="1", job="app-server"} 1.3333333333333333
+
+# FAILING issue #35. eval instant at 50m http_requests{group="canary"} unless ignoring(group, instance) http_requests{instance="0"}
+
+# FAILING. eval instant at 50m http_requests{group="canary"} unless ignoring(group) http_requests{instance="0"}
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="canary", instance="1", job="app-server"} 800
+
+# FAILING. eval instant at 50m http_requests{group="canary"} / ignoring(group) http_requests{group="production"}
+# {instance="0", job="api-server"} 3
+# {instance="0", job="app-server"} 1.4
+# {instance="1", job="api-server"} 2
+# {instance="1", job="app-server"} 1.3333333333333333
+
+# https://github.com/prometheus/prometheus/issues/1489
+# FAILING. eval instant at 50m http_requests AND ON (dummy) vector(1)
+# http_requests{group="canary", instance="0", job="api-server"} 300
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="production", instance="0", job="app-server"} 500
+# http_requests{group="production", instance="1", job="api-server"} 200
+# http_requests{group="production", instance="1", job="app-server"} 600
+
+# FAILING. eval instant at 50m http_requests AND IGNORING (group, instance, job) vector(1)
+# http_requests{group="canary", instance="0", job="api-server"} 300
+# http_requests{group="canary", instance="0", job="app-server"} 700
+# http_requests{group="canary", instance="1", job="api-server"} 400
+# http_requests{group="canary", instance="1", job="app-server"} 800
+# http_requests{group="production", instance="0", job="api-server"} 100
+# http_requests{group="production", instance="0", job="app-server"} 500
+# http_requests{group="production", instance="1", job="api-server"} 200
+# http_requests{group="production", instance="1", job="app-server"} 600
+
+
+# Comparisons.
+eval instant at 50m SUM(http_requests) BY (job) > 1000
+ {job="app-server"} 2600
+
+# FAILING (returns lhs instead of rhs). eval instant at 50m 1000 < SUM(http_requests) BY (job)
+# {job="app-server"} 2600
+
+eval instant at 50m SUM(http_requests) BY (job) <= 1000
+ {job="api-server"} 1000
+
+eval instant at 50m SUM(http_requests) BY (job) != 1000
+ {job="app-server"} 2600
+
+eval instant at 50m SUM(http_requests) BY (job) == 1000
+ {job="api-server"} 1000
+
+eval instant at 50m SUM(http_requests) BY (job) == bool 1000
+ {job="api-server"} 1
+ {job="app-server"} 0
+
+eval instant at 50m SUM(http_requests) BY (job) == bool SUM(http_requests) BY (job)
+ {job="api-server"} 1
+ {job="app-server"} 1
+
+eval instant at 50m SUM(http_requests) BY (job) != bool SUM(http_requests) BY (job)
+ {job="api-server"} 0
+ {job="app-server"} 0
+
+eval instant at 50m 0 == bool 1
+ 0
+
+eval instant at 50m 1 == bool 1
+ 1
+
+eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100
+ {job="api-server", instance="0", group="production"} 1
+
+# group_left/group_right.
+
+clear
+
+load 5m
+ node_var{instance="abc",job="node"} 2
+ node_role{instance="abc",job="node",role="prometheus"} 1
+
+load 5m
+ node_cpu{instance="abc",job="node",mode="idle"} 3
+ node_cpu{instance="abc",job="node",mode="user"} 1
+ node_cpu{instance="def",job="node",mode="idle"} 8
+ node_cpu{instance="def",job="node",mode="user"} 2
+
+load 5m
+ random{foo="bar"} 1
+
+load 5m
+ threshold{instance="abc",job="node",target="a@b.com"} 0
+
+# Copy machine role to node variable.
+eval instant at 5m node_role * on (instance) group_right (role) node_var
+ {instance="abc",job="node",role="prometheus"} 2
+
+# FAILING. eval instant at 5m node_var * on (instance) group_left (role) node_role
+# {instance="abc",job="node",role="prometheus"} 2
+
+# FAILING. eval instant at 5m node_var * ignoring (role) group_left (role) node_role
+# {instance="abc",job="node",role="prometheus"} 2
+
+eval instant at 5m node_role * ignoring (role) group_right (role) node_var
+ {instance="abc",job="node",role="prometheus"} 2
+
+# Copy machine role to node variable with instrumentation labels.
+# FAILING. eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role
+# {instance="abc",job="node",mode="idle",role="prometheus"} 3
+# {instance="abc",job="node",mode="user",role="prometheus"} 1
+
+# FAILING. eval instant at 5m node_cpu * on (instance) group_left (role) node_role
+# {instance="abc",job="node",mode="idle",role="prometheus"} 3
+# {instance="abc",job="node",mode="user",role="prometheus"} 1
+
+
+# Ratio of total.
+eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu)
+ {instance="abc",job="node",mode="idle"} .75
+ {instance="abc",job="node",mode="user"} .25
+ {instance="def",job="node",mode="idle"} .80
+ {instance="def",job="node",mode="user"} .20
+
+eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)
+ {job="node",mode="idle"} 0.7857142857142857
+ {job="node",mode="user"} 0.21428571428571427
+
+eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu))
+ {} 1.0
+
+
+eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu)
+ {instance="abc",job="node",mode="idle"} .75
+ {instance="abc",job="node",mode="user"} .25
+ {instance="def",job="node",mode="idle"} .80
+ {instance="def",job="node",mode="user"} .20
+
+eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu)
+ {instance="abc",job="node",mode="idle"} .75
+ {instance="abc",job="node",mode="user"} .25
+ {instance="def",job="node",mode="idle"} .80
+ {instance="def",job="node",mode="user"} .20
+
+eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)
+ {job="node",mode="idle"} 0.7857142857142857
+ {job="node",mode="user"} 0.21428571428571427
+
+eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu))
+ {} 1.0
+
+
+# Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here).
+# FAILING. eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0
+# {instance="abc",job="node",mode="idle",foo="bar"} 3
+# {instance="abc",job="node",mode="user",foo="bar"} 1
+# {instance="def",job="node",mode="idle",foo="bar"} 8
+# {instance="def",job="node",mode="user",foo="bar"} 2
+
+
+# Use threshold from metric, and copy over target.
+# FAILING. eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold
+# node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
+# node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
+
+# Use threshold from metric, and a default (1) if it's not present.
+# FAILING. eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1))
+# node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3
+# node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1
+# node_cpu{instance="def",job="node",mode="idle"} 8
+# node_cpu{instance="def",job="node",mode="user"} 2
+
+
+# Check that binops drop the metric name.
+eval instant at 5m node_cpu + 2
+ {instance="abc",job="node",mode="idle"} 5
+ {instance="abc",job="node",mode="user"} 3
+ {instance="def",job="node",mode="idle"} 10
+ {instance="def",job="node",mode="user"} 4
+
+eval instant at 5m node_cpu - 2
+ {instance="abc",job="node",mode="idle"} 1
+ {instance="abc",job="node",mode="user"} -1
+ {instance="def",job="node",mode="idle"} 6
+ {instance="def",job="node",mode="user"} 0
+
+eval instant at 5m node_cpu / 2
+ {instance="abc",job="node",mode="idle"} 1.5
+ {instance="abc",job="node",mode="user"} 0.5
+ {instance="def",job="node",mode="idle"} 4
+ {instance="def",job="node",mode="user"} 1
+
+eval instant at 5m node_cpu * 2
+ {instance="abc",job="node",mode="idle"} 6
+ {instance="abc",job="node",mode="user"} 2
+ {instance="def",job="node",mode="idle"} 16
+ {instance="def",job="node",mode="user"} 4
+
+eval instant at 5m node_cpu ^ 2
+ {instance="abc",job="node",mode="idle"} 9
+ {instance="abc",job="node",mode="user"} 1
+ {instance="def",job="node",mode="idle"} 64
+ {instance="def",job="node",mode="user"} 4
+
+eval instant at 5m node_cpu % 2
+ {instance="abc",job="node",mode="idle"} 1
+ {instance="abc",job="node",mode="user"} 1
+ {instance="def",job="node",mode="idle"} 0
+ {instance="def",job="node",mode="user"} 0
+
+
+clear
+
+load 5m
+ random{foo="bar"} 2
+ metricA{baz="meh"} 3
+ metricB{baz="meh"} 4
+
+# On with no labels, for metrics with no common labels.
+# FAILING issue #36. eval instant at 5m random + on() metricA
+# {} 5
+
+# Ignoring with no labels is the same as no ignoring.
+eval instant at 5m metricA + ignoring() metricB
+ {baz="meh"} 7
+
+eval instant at 5m metricA + metricB
+ {baz="meh"} 7
+
+clear
+
+# Test duplicate labelset in promql output.
+load 5m
+ testmetric1{src="a",dst="b"} 0
+ testmetric2{src="a",dst="b"} 1
+
+# FAILING issue #32. eval_fail instant at 0m -{__name__=~'testmetric1|testmetric2'}
diff --git a/src/query/test/compatibility/testdata/regression.test b/src/query/test/compatibility/testdata/regression.test
new file mode 100644
index 0000000000..1f42a77fe0
--- /dev/null
+++ b/src/query/test/compatibility/testdata/regression.test
@@ -0,0 +1,44 @@
+# this test data represents previous regressions
+load 5m
+ http_requests{job="foo", instance="bar"} 0+10x10
+ http_requests{job="foo", instance="baz"} 0+20x10
+ http_requests{job="baz", instance="bar"} 0+30x10
+ http_requests{instance="bar", group="foo"} 0+40x10
+ http_requests{instance="bar", group="baz"} 0+50x10
+ http_requests{instance="baz", group="foo"} 0+60x10
+
+eval instant at 0s http_requests{job=~".*foo"}
+ http_requests{job="foo", instance="bar"} 0
+ http_requests{job="foo", instance="baz"} 0
+
+eval instant at 0s http_requests{job!~".*foo"}
+ http_requests{job="baz", instance="bar"} 0
+ http_requests{instance="bar", group="foo"} 0
+ http_requests{instance="bar", group="baz"} 0
+ http_requests{instance="baz", group="foo"} 0
+
+eval instant at 0s http_requests{job=~".+oo"}
+ http_requests{job="foo", instance="bar"} 0
+ http_requests{job="foo", instance="baz"} 0
+
+eval instant at 0s http_requests{job!~".+oo"}
+ http_requests{job="baz", instance="bar"} 0
+ http_requests{instance="bar", group="foo"} 0
+ http_requests{instance="bar", group="baz"} 0
+ http_requests{instance="baz", group="foo"} 0
+
+eval instant at 0s http_requests{job=~".*"}
+ http_requests{job="foo", instance="bar"} 0
+ http_requests{job="foo", instance="baz"} 0
+ http_requests{job="baz", instance="bar"} 0
+ http_requests{instance="bar", group="foo"} 0
+ http_requests{instance="bar", group="baz"} 0
+ http_requests{instance="baz", group="foo"} 0
+
+eval instant at 0s http_requests{job=~".+"}
+ http_requests{job="foo", instance="bar"} 0
+ http_requests{job="foo", instance="baz"} 0
+ http_requests{job="baz", instance="bar"} 0
+
+clear
+
diff --git a/src/query/test/compatibility/testdata/selectors.test b/src/query/test/compatibility/testdata/selectors.test
new file mode 100644
index 0000000000..9b8b0caf25
--- /dev/null
+++ b/src/query/test/compatibility/testdata/selectors.test
@@ -0,0 +1,103 @@
+load 10s
+ http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000
+ http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x2000
+
+eval instant at 8000s rate(http_requests[1m])
+ {job="api-server", instance="0", group="production"} 1
+ {job="api-server", instance="1", group="production"} 2
+ {job="api-server", instance="0", group="canary"} 3
+ {job="api-server", instance="1", group="canary"} 4
+
+eval instant at 18000s rate(http_requests[1m])
+ {job="api-server", instance="0", group="production"} 3
+ {job="api-server", instance="1", group="production"} 3
+ {job="api-server", instance="0", group="canary"} 8
+ {job="api-server", instance="1", group="canary"} 4
+
+eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m])
+ {job="api-server", instance="0", group="production"} 1
+ {job="api-server", instance="1", group="production"} 2
+
+eval instant at 18000s rate(http_requests{group=~".*ry", instance="1"}[1m])
+ {job="api-server", instance="1", group="canary"} 4
+
+eval instant at 18000s rate(http_requests{instance!="3"}[1m] offset 10000s)
+ {job="api-server", instance="0", group="production"} 1
+ {job="api-server", instance="1", group="production"} 2
+ {job="api-server", instance="0", group="canary"} 3
+ {job="api-server", instance="1", group="canary"} 4
+
+eval instant at 18000s rate(http_requests[40s]) - rate(http_requests[1m] offset 10000s)
+ {job="api-server", instance="0", group="production"} 2
+ {job="api-server", instance="1", group="production"} 1
+ {job="api-server", instance="0", group="canary"} 5
+ {job="api-server", instance="1", group="canary"} 0
+
+# https://github.com/prometheus/prometheus/issues/3575
+eval instant at 0s http_requests{foo!="bar"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+ http_requests{job="api-server", instance="1", group="production"} 0
+ http_requests{job="api-server", instance="0", group="canary"} 0
+ http_requests{job="api-server", instance="1", group="canary"} 0
+
+eval instant at 0s http_requests{foo!="bar", job="api-server"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+ http_requests{job="api-server", instance="1", group="production"} 0
+ http_requests{job="api-server", instance="0", group="canary"} 0
+ http_requests{job="api-server", instance="1", group="canary"} 0
+
+eval instant at 0s http_requests{foo!~"bar", job="api-server"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+ http_requests{job="api-server", instance="1", group="production"} 0
+ http_requests{job="api-server", instance="0", group="canary"} 0
+ http_requests{job="api-server", instance="1", group="canary"} 0
+
+eval instant at 0s http_requests{foo!~"bar", job="api-server", instance="1", x!="y", z="", group!=""}
+ http_requests{job="api-server", instance="1", group="production"} 0
+ http_requests{job="api-server", instance="1", group="canary"} 0
+
+# check special casing for existing label
+eval instant at 0s http_requests{job="", instance="0", group="production"}
+
+eval instant at 0s http_requests{job!="", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{job=~"", instance="0", group="production"}
+
+eval instant at 0s http_requests{job!~"", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{job=~".+", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{job=~".*", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{job!~".+", instance="0", group="production"}
+
+eval instant at 0s http_requests{job!~".*", instance="0", group="production"}
+
+# check special casing for non-existent label
+eval instant at 0s http_requests{foo="", job="api-server", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{foo!="", job="api-server", instance="0", group="production"}
+
+eval instant at 0s http_requests{foo=~"", job="api-server", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{foo!~"", job="api-server", instance="0", group="production"}
+
+eval instant at 0s http_requests{foo=~".+", job="api-server", instance="0", group="production"}
+
+eval instant at 0s http_requests{foo=~".*", job="api-server", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{foo!~".+", job="api-server", instance="0", group="production"}
+ http_requests{job="api-server", instance="0", group="production"} 0
+
+eval instant at 0s http_requests{foo!~".*", job="api-server", instance="0", group="production"}
+
+clear
diff --git a/src/query/test/compatibility/testdata/staleness.test b/src/query/test/compatibility/testdata/staleness.test
new file mode 100644
index 0000000000..000a6051cb
--- /dev/null
+++ b/src/query/test/compatibility/testdata/staleness.test
@@ -0,0 +1,50 @@
+load 10s
+ metric 0 1 stale 2
+
+# Instant vector doesn't return series when stale.
+eval instant at 10s metric
+ {__name__="metric"} 1
+
+#eval instant at 20s metric
+
+eval instant at 30s metric
+ {__name__="metric"} 2
+
+eval instant at 40s metric
+ {__name__="metric"} 2
+
+# It goes stale 5 minutes after the last sample.
+eval instant at 330s metric
+ {__name__="metric"} 2
+
+#eval instant at 331s metric
+
+
+# Range vector ignores stale sample.
+eval instant at 30s count_over_time(metric[1m])
+ {} 3
+
+eval instant at 10s count_over_time(metric[1s])
+ {} 1
+
+# FAILING. eval instant at 20s count_over_time(metric[1s])
+
+eval instant at 20s count_over_time(metric[10s])
+ {} 1
+
+clear
+
+load 10s
+ metric 0
+
+# Series with single point goes stale after 5 minutes.
+eval instant at 0s metric
+ {__name__="metric"} 0
+
+eval instant at 150s metric
+ {__name__="metric"} 0
+
+eval instant at 300s metric
+ {__name__="metric"} 0
+
+# FAILING. eval instant at 301s metric
diff --git a/src/query/test/compatibility/testdata/subquery.test b/src/query/test/compatibility/testdata/subquery.test
new file mode 100644
index 0000000000..376f066087
--- /dev/null
+++ b/src/query/test/compatibility/testdata/subquery.test
@@ -0,0 +1,117 @@
+load 10s
+ metric 1 2
+
+# Evaluation before 0s gets no sample.
+# FAILING issue #38. eval instant at 10s sum_over_time(metric[50s:10s])
+# {} 3
+
+# FAILING issue #38. eval instant at 10s sum_over_time(metric[50s:5s])
+# {} 4
+
+# Every evaluation yields the last value, i.e. 2
+# FAILING issue #38. eval instant at 5m sum_over_time(metric[50s:10s])
+# {} 12
+
+# Series becomes stale at 5m10s (5m after last sample)
+# Hence subquery gets a single sample at 6m-50s=5m10s.
+# FAILING issue #38. eval instant at 6m sum_over_time(metric[50s:10s])
+# {} 2
+
+# FAILING issue #38. eval instant at 10s rate(metric[20s:10s])
+# {} 0.1
+
+# FAILING issue #38. eval instant at 20s rate(metric[20s:5s])
+# {} 0.05
+
+clear
+
+load 10s
+ http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000
+ http_requests{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000
+ http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000
+ http_requests{job="api-server", instance="1", group="canary"} 0+40x2000
+
+# FAILING issue #38. eval instant at 8000s rate(http_requests{group=~"pro.*"}[1m:10s])
+# {job="api-server", instance="0", group="production"} 1
+# {job="api-server", instance="1", group="production"} 2
+
+# FAILING issue #38. eval instant at 20000s avg_over_time(rate(http_requests[1m])[1m:1s])
+# {job="api-server", instance="0", group="canary"} 8
+# {job="api-server", instance="1", group="canary"} 4
+# {job="api-server", instance="1", group="production"} 3
+# {job="api-server", instance="0", group="production"} 3
+
+clear
+
+load 10s
+ metric1 0+1x1000
+ metric2 0+2x1000
+ metric3 0+3x1000
+
+# FAILING issue #38. eval instant at 1000s sum_over_time(metric1[30s:10s])
+# {} 394
+
+# This is (394*2 - 100), because other than the last 100 at 1000s,
+# everything else is repeated with the 5s step.
+# FAILING issue #38. eval instant at 1000s sum_over_time(metric1[30s:5s])
+# {} 688
+
+# Offset is aligned with the step.
+# FAILING issue #38. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s)
+# {} 394
+
+# Same result for different offsets due to step alignment.
+# FAILING issue #38. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s)
+# {} 297
+
+# FAILING issue #38. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 7s)
+# {} 297
+
+# FAILING issue #38. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 5s)
+# {} 297
+
+# FAILING issue #38. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s)
+# {} 297
+
+# FAILING issue #38. eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
+# {} 297
+
+# Nested subqueries
+# FAILING issue #38. eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
+# {} 0.4
+
+# FAILING issue #38. eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s])
+# {} 0.8
+
+# FAILING issue #38. eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s])
+# {} 1.2
+
+# FAILING issue #38. eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s])
+# {} 2.4
+
+clear
+
+# Fibonacci sequence, to ensure the rate is not constant.
+# Additional note: using subqueries unnecessarily is unwise.
+load 7s
+ metric 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169 63245986 102334155 165580141 267914296 433494437 701408733 1134903170 1836311903 2971215073 4807526976 7778742049 12586269025 20365011074 32951280099 53316291173 86267571272 139583862445 225851433717 365435296162 591286729879 956722026041 1548008755920 2504730781961 4052739537881 6557470319842 10610209857723 17167680177565 27777890035288 44945570212853 72723460248141 117669030460994 190392490709135 308061521170129 498454011879264 806515533049393 1304969544928657 2111485077978050 3416454622906707 5527939700884757 8944394323791464 14472334024676221 23416728348467685 37889062373143906 61305790721611591 99194853094755497 160500643816367088 259695496911122585 420196140727489673 679891637638612258 1100087778366101931 1779979416004714189 2880067194370816120 4660046610375530309 7540113804746346429 12200160415121876738 19740274219868223167 31940434634990099905 51680708854858323072 83621143489848422977 135301852344706746049 218922995834555169026 354224848179261915075 573147844013817084101 927372692193078999176 1500520536206896083277 2427893228399975082453 3928413764606871165730 6356306993006846248183 10284720757613717413913 16641027750620563662096 26925748508234281076009 43566776258854844738105 70492524767089125814114 114059301025943970552219 184551825793033096366333 298611126818977066918552 483162952612010163284885 781774079430987230203437 1264937032042997393488322 2046711111473984623691759 3311648143516982017180081 5358359254990966640871840 8670007398507948658051921 14028366653498915298923761 22698374052006863956975682 36726740705505779255899443 59425114757512643212875125 96151855463018422468774568 155576970220531065681649693 251728825683549488150424261 407305795904080553832073954 659034621587630041982498215 1066340417491710595814572169 1725375039079340637797070384 2791715456571051233611642553 4517090495650391871408712937 7308805952221443105020355490 11825896447871834976429068427 19134702400093278081449423917 30960598847965113057878492344 50095301248058391139327916261 81055900096023504197206408605 131151201344081895336534324866 212207101440105399533740733471 343358302784187294870275058337 555565404224292694404015791808 898923707008479989274290850145 1454489111232772683678306641953 2353412818241252672952597492098 3807901929474025356630904134051 6161314747715278029583501626149 9969216677189303386214405760200 16130531424904581415797907386349 26099748102093884802012313146549 42230279526998466217810220532898 68330027629092351019822533679447 110560307156090817237632754212345 178890334785183168257455287891792 289450641941273985495088042104137 468340976726457153752543329995929 757791618667731139247631372100066 1226132595394188293000174702095995 1983924214061919432247806074196061 3210056809456107725247980776292056 5193981023518027157495786850488117 8404037832974134882743767626780173 13598018856492162040239554477268290 22002056689466296922983322104048463 35600075545958458963222876581316753 57602132235424755886206198685365216 93202207781383214849429075266681969 150804340016807970735635273952047185 244006547798191185585064349218729154 394810887814999156320699623170776339 638817435613190341905763972389505493 1033628323428189498226463595560281832 1672445759041379840132227567949787325 2706074082469569338358691163510069157 4378519841510949178490918731459856482 7084593923980518516849609894969925639 11463113765491467695340528626429782121 18547707689471986212190138521399707760
+
+# Extrapolated from [3@21, 144@77]: (144 - 3) / (77 - 21)
+eval instant at 80s rate(metric[1m])
+ {} 2.517857143
+
+# No extrapolation, [2@20, 144@80]: (144 - 2) / 60
+# FAILING issue #38. eval instant at 80s rate(metric[1m:10s])
+# {} 2.366666667
+
+# Only one value between 10s and 20s, 2@14
+eval instant at 20s min_over_time(metric[10s])
+ {} 2
+
+# min(1@10, 2@20)
+# FAILING issue #38. eval instant at 20s min_over_time(metric[10s:10s])
+# {} 1
+
+# FAILING issue #38. eval instant at 20m min_over_time(rate(metric[5m])[20m:1m])
+# {} 0.12119047619047618
+
diff --git a/src/query/test/mock_pools.go b/src/query/test/mock_pools.go
index 727b607f79..b543d321a1 100644
--- a/src/query/test/mock_pools.go
+++ b/src/query/test/mock_pools.go
@@ -107,7 +107,9 @@ func (ip *MockIteratorPool) ID() ident.Pool {
// TagDecoder exposes the session's tag decoder pool
func (ip *MockIteratorPool) TagDecoder() serialize.TagDecoderPool {
ip.DecodePoolUsed = true
- decoderPool := serialize.NewTagDecoderPool(serialize.NewTagDecoderOptions(), poolOpts)
+ decoderPool := serialize.NewTagDecoderPool(
+ serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{}),
+ poolOpts)
decoderPool.Init()
return decoderPool
}
diff --git a/src/query/test/seriesiter/mock_iter.go b/src/query/test/seriesiter/mock_iter.go
index b8cc7c5d47..525ed844c6 100644
--- a/src/query/test/seriesiter/mock_iter.go
+++ b/src/query/test/seriesiter/mock_iter.go
@@ -39,7 +39,8 @@ func GenerateSingleSampleTagIterator(ctrl *gomock.Controller, tag ident.Tag) ide
mockTagIterator.EXPECT().Next().Return(true).MaxTimes(1)
mockTagIterator.EXPECT().Current().Return(tag).MaxTimes(1)
mockTagIterator.EXPECT().Next().Return(false).MaxTimes(1)
- mockTagIterator.EXPECT().Err().Return(nil).MaxTimes(1)
+ mockTagIterator.EXPECT().Err().Return(nil).AnyTimes()
+ mockTagIterator.EXPECT().Rewind().Return().MaxTimes(1)
mockTagIterator.EXPECT().Close().AnyTimes()
return mockTagIterator
@@ -87,7 +88,7 @@ func NewMockSeriesIteratorFromBase(mockIter *encoding.MockSeriesIterator, tagGen
tags := tagGenerator()
mockIter.EXPECT().Namespace().Return(ident.StringID("foo")).AnyTimes()
mockIter.EXPECT().ID().Return(ident.StringID("bar")).AnyTimes()
- mockIter.EXPECT().Tags().Return(tags).MaxTimes(1)
+ mockIter.EXPECT().Tags().Return(tags).AnyTimes()
mockIter.EXPECT().Close().Do(func() {
// Make sure to close the tags generated when closing the iter
tags.Close()
diff --git a/src/query/test/storage.go b/src/query/test/storage.go
index a78eec0285..ad5204b129 100644
--- a/src/query/test/storage.go
+++ b/src/query/test/storage.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/storage"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
)
// slowStorage slows down a request by delay
@@ -73,7 +74,7 @@ func (s *slowStorage) CompleteTags(
ctx context.Context,
query *storage.CompleteTagsQuery,
options *storage.FetchOptions,
-) (*storage.CompleteTagsResult, error) {
+) (*consolidators.CompleteTagsResult, error) {
time.Sleep(s.delay)
return s.storage.CompleteTags(ctx, query, options)
}
diff --git a/src/query/test/test_series_iterator.go b/src/query/test/test_series_iterator.go
index f28aa638f2..a2adb81b66 100644
--- a/src/query/test/test_series_iterator.go
+++ b/src/query/test/test_series_iterator.go
@@ -137,7 +137,6 @@ func buildReplica() (encoding.MultiReaderIterator, error) {
{mergedReader},
unmergedReaders,
})
-
multiReader.ResetSliceOfSlices(sliceOfSlicesIter, nil)
return multiReader, nil
}
@@ -185,8 +184,8 @@ func BuildTestSeriesIterator(id string) (encoding.SeriesIterator, error) {
ID: ident.StringID(id),
Namespace: ident.StringID(SeriesNamespace),
Tags: ident.NewTagsIterator(tags),
- StartInclusive: SeriesStart,
- EndExclusive: End,
+ StartInclusive: xtime.ToUnixNano(SeriesStart),
+ EndExclusive: xtime.ToUnixNano(End),
Replicas: []encoding.MultiReaderIterator{
replicaOne,
replicaTwo,
@@ -263,8 +262,8 @@ func BuildCustomIterator(
ID: ident.StringID(seriesID),
Namespace: ident.StringID(seriesNamespace),
Tags: ident.NewTagsIterator(tags),
- StartInclusive: start,
- EndExclusive: currentStart.Add(blockSize),
+ StartInclusive: xtime.ToUnixNano(start),
+ EndExclusive: xtime.ToUnixNano(currentStart.Add(blockSize)),
Replicas: []encoding.MultiReaderIterator{
multiReader,
},
diff --git a/src/query/tracepoint/tracepoint.go b/src/query/tracepoint/tracepoint.go
index 99c0499b55..cdcd02b316 100644
--- a/src/query/tracepoint/tracepoint.go
+++ b/src/query/tracepoint/tracepoint.go
@@ -31,6 +31,9 @@ const (
// FetchCompressedFetchTagged is for the call to FetchTagged in fetchCompressed.
FetchCompressedFetchTagged = "m3.m3storage.fetchCompressed.FetchTagged"
+ // FetchCompressedInspectSeries is for the call to InspectSeries in FetchCompressed.
+ FetchCompressedInspectSeries = "m3.m3storage.FetchCompressed.InspectSeries"
+
// SearchCompressedFetchTaggedIDs is for the call to FetchTaggedIDs in SearchCompressed.
SearchCompressedFetchTaggedIDs = "m3.m3storage.SearchCompressed.FetchTaggedIDs"
diff --git a/src/query/ts/m3db/consolidators/series_consolidator.go b/src/query/ts/m3db/consolidators/series_consolidator.go
index 9afb710db2..145ab1db6c 100644
--- a/src/query/ts/m3db/consolidators/series_consolidator.go
+++ b/src/query/ts/m3db/consolidators/series_consolidator.go
@@ -47,12 +47,13 @@ func NewSeriesLookbackConsolidator(
startTime time.Time,
fn ConsolidationFunc,
) *SeriesLookbackConsolidator {
+ datapoints := make([]ts.Datapoint, 0, initLength)
return &SeriesLookbackConsolidator{
lookbackDuration: lookbackDuration,
stepSize: stepSize,
earliestLookback: startTime.Add(-1 * lookbackDuration),
consolidated: math.NaN(),
- datapoints: make([]ts.Datapoint, 0, initLength),
+ datapoints: datapoints,
fn: fn,
}
}
@@ -76,6 +77,19 @@ func (c *SeriesLookbackConsolidator) ConsolidateAndMoveToNext() float64 {
c.earliestLookback = c.earliestLookback.Add(c.stepSize)
c.consolidated = c.fn(c.datapoints)
c.datapoints = removeStale(c.earliestLookback, c.datapoints)
+
+ // Remove any datapoints not relevant to the next step now.
+ datapointsRelevant := removeStale(c.earliestLookback, c.datapoints)
+ if len(datapointsRelevant) > 0 {
+ // Move them back to the start of the slice to reuse the slice
+ // as best as possible.
+ c.datapoints = c.datapoints[:len(datapointsRelevant)]
+ copy(c.datapoints, datapointsRelevant)
+ } else {
+ // No relevant datapoints, repoint to the start of the buffer.
+ c.datapoints = c.datapoints[:0]
+ }
+
return c.consolidated
}
diff --git a/src/query/ts/m3db/consolidators/step_accumulator.go b/src/query/ts/m3db/consolidators/step_accumulator.go
index a61da25924..eb6e052ce8 100644
--- a/src/query/ts/m3db/consolidators/step_accumulator.go
+++ b/src/query/ts/m3db/consolidators/step_accumulator.go
@@ -104,6 +104,16 @@ func (c *StepLookbackAccumulator) AccumulateAndMoveToNext() []xts.Datapoint {
}
val := c.unconsumed[0]
- c.unconsumed = c.unconsumed[1:]
+ remaining := c.unconsumed[1:]
+
+ if len(remaining) > 0 {
+ // Move any unconsumed values to the front of unconsumed.
+ c.unconsumed = c.buffer[:len(remaining)]
+ copy(c.unconsumed, remaining)
+ } else {
+ // Otherwise just repoint to the start of the buffer.
+ c.unconsumed = c.buffer[:0]
+ }
+
return val
}
diff --git a/src/query/ts/m3db/consolidators/step_consolidator.go b/src/query/ts/m3db/consolidators/step_consolidator.go
index 37eea4601c..6d53c3dabb 100644
--- a/src/query/ts/m3db/consolidators/step_consolidator.go
+++ b/src/query/ts/m3db/consolidators/step_consolidator.go
@@ -91,7 +91,20 @@ func (c *StepLookbackConsolidator) AddPoint(dp ts.Datapoint) {
func (c *StepLookbackConsolidator) BufferStep() {
c.earliestLookback = c.earliestLookback.Add(c.stepSize)
val := c.fn(c.datapoints)
- c.datapoints = removeStale(c.earliestLookback, c.datapoints)
+
+ // Remove any datapoints not relevant to the next step now.
+ datapointsRelevant := removeStale(c.earliestLookback, c.datapoints)
+ if len(datapointsRelevant) > 0 {
+ // Move them back to the start of the slice to reuse the slice
+ // as best as possible.
+ c.datapoints = c.datapoints[:len(datapointsRelevant)]
+ copy(c.datapoints, datapointsRelevant)
+ } else {
+ // No relevant datapoints, repoint to the start of the buffer.
+ c.datapoints = c.datapoints[:0]
+ }
+
+ // Blindly append to unconsumed.
c.unconsumed = append(c.unconsumed, val)
}
@@ -107,7 +120,18 @@ func (c *StepLookbackConsolidator) ConsolidateAndMoveToNext() float64 {
return c.fn(nil)
}
+ // Consume value.
val := c.unconsumed[0]
- c.unconsumed = c.unconsumed[1:]
+ remaining := c.unconsumed[1:]
+
+ if len(remaining) > 0 {
+ // Move any unconsumed values to the front of unconsumed.
+ c.unconsumed = c.buffer[:len(remaining)]
+ copy(c.unconsumed, remaining)
+ } else {
+ // Otherwise just repoint to the start of the buffer.
+ c.unconsumed = c.buffer[:0]
+ }
+
return val
}
diff --git a/src/query/ts/m3db/consolidators/types.go b/src/query/ts/m3db/consolidators/types.go
index 54979668e3..597fecad04 100644
--- a/src/query/ts/m3db/consolidators/types.go
+++ b/src/query/ts/m3db/consolidators/types.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -58,7 +58,7 @@ func TakeLast(values []ts.Datapoint) float64 {
return math.NaN()
}
-const initLength = 10
+const initLength = BufferSteps
// Set NaN to a variable makes tests easier.
var nan = math.NaN()
diff --git a/src/query/ts/m3db/convert.go b/src/query/ts/m3db/convert.go
index 09fdc1218f..49a9220736 100644
--- a/src/query/ts/m3db/convert.go
+++ b/src/query/ts/m3db/convert.go
@@ -30,8 +30,10 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
+ xtime "github.com/m3db/m3/src/x/time"
)
const (
@@ -71,12 +73,11 @@ func (b seriesBlocks) Less(i, j int) bool {
}
func seriesIteratorsToEncodedBlockIterators(
- iterators encoding.SeriesIterators,
+ result consolidators.SeriesFetchResult,
bounds models.Bounds,
- resultMeta block.ResultMetadata,
opts Options,
) ([]block.Block, error) {
- bl, err := NewEncodedBlock(iterators.Iters(), bounds, true, resultMeta, opts)
+ bl, err := NewEncodedBlock(result, bounds, true, opts)
if err != nil {
return nil, err
}
@@ -88,9 +89,8 @@ func seriesIteratorsToEncodedBlockIterators(
// lookback is greater than 0, converts the entire series into a single block,
// otherwise, splits the series into blocks.
func ConvertM3DBSeriesIterators(
- iterators encoding.SeriesIterators,
+ result consolidators.SeriesFetchResult,
bounds models.Bounds,
- resultMeta block.ResultMetadata,
opts Options,
) ([]block.Block, error) {
if err := opts.Validate(); err != nil {
@@ -98,32 +98,35 @@ func ConvertM3DBSeriesIterators(
}
if opts.SplittingSeriesByBlock() {
- return convertM3DBSegmentedBlockIterators(iterators, bounds,
- resultMeta, opts)
+ return convertM3DBSegmentedBlockIterators(result, bounds, opts)
}
- return seriesIteratorsToEncodedBlockIterators(iterators, bounds,
- resultMeta, opts)
+ return seriesIteratorsToEncodedBlockIterators(result, bounds, opts)
}
// convertM3DBSegmentedBlockIterators converts series iterators to a list of blocks
func convertM3DBSegmentedBlockIterators(
- iterators encoding.SeriesIterators,
+ result consolidators.SeriesFetchResult,
bounds models.Bounds,
- resultMeta block.ResultMetadata,
opts Options,
) ([]block.Block, error) {
- defer iterators.Close()
- blockBuilder := newEncodedBlockBuilder(resultMeta, opts)
+ defer result.Close()
+ blockBuilder := newEncodedBlockBuilder(result, opts)
var (
iterAlloc = opts.IterAlloc()
pools = opts.IteratorPools()
checkedPools = opts.CheckedBytesPool()
)
- for _, seriesIterator := range iterators.Iters() {
+ count := result.Count()
+ for i := 0; i < count; i++ {
+ iter, tags, err := result.IterTagsAtIndex(i, opts.TagOptions())
+ if err != nil {
+ return nil, err
+ }
+
blockReplicas, err := blockReplicasFromSeriesIterator(
- seriesIterator,
+ iter,
iterAlloc,
bounds,
pools,
@@ -137,16 +140,19 @@ func convertM3DBSegmentedBlockIterators(
blockReplicas = updateSeriesBlockStarts(
blockReplicas,
bounds.StepSize,
- seriesIterator.Start(),
+ iter.Start(),
)
err = seriesBlocksFromBlockReplicas(
blockBuilder,
+ tags,
+ result.Metadata,
blockReplicas,
bounds.StepSize,
- seriesIterator,
+ iter,
pools,
)
+
if err != nil {
return nil, err
}
@@ -168,7 +174,12 @@ func blockReplicasFromSeriesIterator(
pool = pools.MultiReaderIterator()
}
- for _, replica := range seriesIterator.Replicas() {
+ replicas, err := seriesIterator.Replicas()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, replica := range replicas {
perBlockSliceReaders := replica.Readers()
for next := true; next; next = perBlockSliceReaders.Next() {
l, start, bs := perBlockSliceReaders.CurrentReaders()
@@ -256,23 +267,20 @@ func updateSeriesBlockStarts(
func seriesBlocksFromBlockReplicas(
blockBuilder *encodedBlockBuilder,
+ tags models.Tags,
+ resultMetadata block.ResultMetadata,
blockReplicas seriesBlocks,
stepSize time.Duration,
seriesIterator encoding.SeriesIterator,
pools encoding.IteratorPools,
) error {
- // NB(braskin): we need to clone the ID, namespace, and tags since we close the series iterator
+ // NB: clone ID and Namespace since they must be owned by the series blocks.
var (
// todo(braskin): use ident pool
clonedID = ident.StringID(seriesIterator.ID().String())
clonedNamespace = ident.StringID(seriesIterator.Namespace().String())
)
- clonedTags, err := cloneTagIterator(seriesIterator.Tags())
- if err != nil {
- return err
- }
-
replicaLength := len(blockReplicas) - 1
// TODO: use pooling
for i, block := range blockReplicas {
@@ -290,9 +298,8 @@ func seriesBlocksFromBlockReplicas(
iter := encoding.NewSeriesIterator(encoding.SeriesIteratorOptions{
ID: clonedID,
Namespace: clonedNamespace,
- Tags: clonedTags.Duplicate(),
- StartInclusive: filterValuesStart,
- EndExclusive: filterValuesEnd,
+ StartInclusive: xtime.ToUnixNano(filterValuesStart),
+ EndExclusive: xtime.ToUnixNano(filterValuesEnd),
Replicas: block.replicas,
}, nil)
@@ -307,34 +314,17 @@ func seriesBlocksFromBlockReplicas(
// Instead, we should access them through the SeriesBlock.
isLastBlock := i == replicaLength
blockBuilder.add(
+ iter,
+ tags,
+ resultMetadata,
models.Bounds{
Start: block.readStart,
Duration: duration,
StepSize: stepSize,
},
- iter,
isLastBlock,
)
}
return nil
}
-
-func cloneTagIterator(tagIter ident.TagIterator) (ident.TagIterator, error) {
- tags := ident.NewTags()
- dupeIter := tagIter.Duplicate()
- for dupeIter.Next() {
- tag := dupeIter.Current()
- tags.Append(ident.Tag{
- Name: ident.BytesID(tag.Name.Bytes()),
- Value: ident.BytesID(tag.Value.Bytes()),
- })
- }
-
- err := dupeIter.Err()
- if err != nil {
- return nil, err
- }
-
- return ident.NewTagsIterator(tags), nil
-}
diff --git a/src/query/ts/m3db/convert_test.go b/src/query/ts/m3db/convert_test.go
index 47487f3a1d..d5d565ebe8 100644
--- a/src/query/ts/m3db/convert_test.go
+++ b/src/query/ts/m3db/convert_test.go
@@ -21,8 +21,10 @@
package m3db
import (
+ "bytes"
"fmt"
"math"
+ "sort"
"testing"
"time"
@@ -144,24 +146,33 @@ func verifyBoundsAndGetBlockIndex(t *testing.T, bounds, sub models.Bounds) int {
return int(diff / blockSize)
}
+type ascByName []block.SeriesMeta
+
+func (m ascByName) Len() int { return len(m) }
+func (m ascByName) Less(i, j int) bool {
+ return bytes.Compare(m[i].Name, m[j].Name) == -1
+}
+func (m ascByName) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
func verifyMetas(
t *testing.T,
- i int,
+ _ int,
meta block.Metadata,
metas []block.SeriesMeta,
) {
require.Equal(t, 0, meta.Tags.Len())
+ sort.Sort(ascByName(metas))
for i, m := range metas {
- assert.Equal(t, []byte(fmt.Sprintf("abc%d", i)), m.Name)
+ assert.Equal(t, fmt.Sprintf("abc%d", i), string(m.Name))
require.Equal(t, 2, m.Tags.Len())
val, found := m.Tags.Get([]byte("a"))
assert.True(t, found)
- assert.Equal(t, []byte("b"), val)
+ assert.Equal(t, "b", string(val))
val, found = m.Tags.Get([]byte("c"))
assert.True(t, found)
- assert.Equal(t, []byte(fmt.Sprint(i)), val)
+ require.Equal(t, fmt.Sprint(i), string(val))
}
}
@@ -175,10 +186,11 @@ func generateBlocks(
opts Options,
) ([]block.Block, models.Bounds) {
iterators, bounds := generateIterators(t, stepSize)
+ res, err := iterToFetchResult(iterators)
+ require.NoError(t, err)
blocks, err := ConvertM3DBSeriesIterators(
- encoding.NewSeriesIterators(iterators, nil),
+ res,
bounds,
- block.NewResultMetadata(),
opts,
)
require.NoError(t, err)
diff --git a/src/query/ts/m3db/encoded_block.go b/src/query/ts/m3db/encoded_block.go
index e36c160ba1..969231c9aa 100644
--- a/src/query/ts/m3db/encoded_block.go
+++ b/src/query/ts/m3db/encoded_block.go
@@ -26,12 +26,12 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
- "github.com/m3db/m3/src/query/storage"
- "github.com/m3db/m3/src/query/ts/m3db/consolidators"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
+ tsconsolidators "github.com/m3db/m3/src/query/ts/m3db/consolidators"
)
type consolidationSettings struct {
- consolidationFn consolidators.ConsolidationFunc
+ consolidationFn tsconsolidators.ConsolidationFunc
currentTime time.Time
bounds models.Bounds
}
@@ -49,47 +49,68 @@ type encodedBlock struct {
// NewEncodedBlock builds an encoded block.
func NewEncodedBlock(
- seriesBlockIterators []encoding.SeriesIterator,
+ result consolidators.SeriesFetchResult,
bounds models.Bounds,
lastBlock bool,
- resultMeta block.ResultMetadata,
opts Options,
) (block.Block, error) {
+ if err := result.Verify(); err != nil {
+ return nil, err
+ }
+
consolidation := consolidationSettings{
- consolidationFn: consolidators.TakeLast,
+ consolidationFn: tsconsolidators.TakeLast,
currentTime: bounds.Start,
bounds: bounds,
}
- bl := newEncodedBlock(
- seriesBlockIterators,
+ bl, err := newEncodedBlock(
+ result,
consolidation,
lastBlock,
- resultMeta,
opts,
)
- if err := bl.generateMetas(); err != nil {
+ if err != nil {
return nil, err
}
- return &bl, nil
+ return bl, nil
}
func newEncodedBlock(
- seriesBlockIterators []encoding.SeriesIterator,
+ result consolidators.SeriesFetchResult,
consolidation consolidationSettings,
lastBlock bool,
- resultMeta block.ResultMetadata,
options Options,
-) encodedBlock {
- return encodedBlock{
- seriesBlockIterators: seriesBlockIterators,
+) (*encodedBlock, error) {
+ count := result.Count()
+ seriesMetas := make([]block.SeriesMeta, 0, count)
+ for i := 0; i < count; i++ {
+ iter, tags, err := result.IterTagsAtIndex(i, options.TagOptions())
+ if err != nil {
+ return nil, err
+ }
+
+ seriesMetas = append(seriesMetas, block.SeriesMeta{
+ Name: iter.ID().Bytes(),
+ Tags: tags,
+ })
+ }
+
+ return &encodedBlock{
+ seriesBlockIterators: result.SeriesIterators(),
consolidation: consolidation,
lastBlock: lastBlock,
- resultMeta: resultMeta,
+ resultMeta: result.Metadata,
options: options,
- }
+ seriesMetas: seriesMetas,
+ meta: block.Metadata{
+ Tags: models.NewTags(0, options.TagOptions()),
+ Bounds: consolidation.bounds,
+ ResultMetadata: result.Metadata,
+ },
+ }, nil
}
func (b *encodedBlock) Close() error {
@@ -104,42 +125,6 @@ func (b *encodedBlock) Meta() block.Metadata {
return b.meta
}
-func (b *encodedBlock) buildSeriesMeta() error {
- b.seriesMetas = make([]block.SeriesMeta, len(b.seriesBlockIterators))
- tagOptions := b.options.TagOptions()
- for i, iter := range b.seriesBlockIterators {
- tags, err := storage.FromIdentTagIteratorToTags(iter.Tags(), tagOptions)
- if err != nil {
- return err
- }
-
- b.seriesMetas[i] = block.SeriesMeta{
- Name: iter.ID().Bytes(),
- Tags: tags,
- }
- }
-
- return nil
-}
-
-func (b *encodedBlock) buildMeta() {
- b.meta = block.Metadata{
- Tags: models.NewTags(0, b.options.TagOptions()),
- Bounds: b.consolidation.bounds,
- ResultMetadata: b.resultMeta,
- }
-}
-
-func (b *encodedBlock) generateMetas() error {
- err := b.buildSeriesMeta()
- if err != nil {
- return err
- }
-
- b.buildMeta()
- return nil
-}
-
func (b *encodedBlock) Info() block.BlockInfo {
return block.NewBlockInfo(block.BlockM3TSZCompressed)
}
diff --git a/src/query/ts/m3db/encoded_block_builder.go b/src/query/ts/m3db/encoded_block_builder.go
index 93021f9866..100a21af47 100644
--- a/src/query/ts/m3db/encoded_block_builder.go
+++ b/src/query/ts/m3db/encoded_block_builder.go
@@ -28,7 +28,9 @@ import (
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
)
const initBlockLength = 10
@@ -47,21 +49,23 @@ type encodedBlockBuilder struct {
}
func newEncodedBlockBuilder(
- resultMeta block.ResultMetadata,
+ result consolidators.SeriesFetchResult,
options Options,
) *encodedBlockBuilder {
return &encodedBlockBuilder{
- resultMeta: resultMeta,
+ resultMeta: result.Metadata,
blocksAtTime: make(blocksAtTime, 0, initBlockLength),
options: options,
}
}
func (b *encodedBlockBuilder) add(
- bounds models.Bounds,
iter encoding.SeriesIterator,
+ tags models.Tags,
+ meta block.ResultMetadata,
+ bounds models.Bounds,
lastBlock bool,
-) {
+) error {
start := bounds.Start
consolidation := consolidationSettings{
consolidationFn: b.options.ConsolidationFunc(),
@@ -69,28 +73,40 @@ func (b *encodedBlockBuilder) add(
bounds: bounds,
}
+ seriesMeta := block.SeriesMeta{
+ Name: iter.ID().Bytes(),
+ Tags: tags,
+ }
+
for idx, bl := range b.blocksAtTime {
if bl.time.Equal(start) {
block := bl.block
block.seriesBlockIterators = append(block.seriesBlockIterators, iter)
+ block.seriesMetas = append(block.seriesMetas, seriesMeta)
b.blocksAtTime[idx].block = block
- return
+ return nil
}
}
- block := newEncodedBlock(
- []encoding.SeriesIterator{},
+ bl, err := newEncodedBlock(
+ consolidators.NewEmptyFetchResult(meta),
consolidation,
lastBlock,
- b.resultMeta,
b.options,
)
- block.seriesBlockIterators = append(block.seriesBlockIterators, iter)
+ if err != nil {
+ return err
+ }
+
+ bl.seriesBlockIterators = append(bl.seriesBlockIterators, iter)
+ bl.seriesMetas = append(bl.seriesMetas, seriesMeta)
b.blocksAtTime = append(b.blocksAtTime, blockAtTime{
time: start,
- block: block,
+ block: *bl,
})
+
+ return nil
}
func (b *encodedBlockBuilder) build() ([]block.Block, error) {
@@ -106,10 +122,6 @@ func (b *encodedBlockBuilder) build() ([]block.Block, error) {
blocks := make([]block.Block, 0, len(b.blocksAtTime))
for _, bl := range b.blocksAtTime {
block := bl.block
- if err := block.generateMetas(); err != nil {
- return nil, err
- }
-
blocks = append(blocks, &block)
}
@@ -129,7 +141,7 @@ func (b *encodedBlockBuilder) backfillMissing() error {
seenMap := make(map[string]seriesIteratorDetails, initBlockReplicaLength)
for idx, bl := range b.blocksAtTime {
block := bl.block
- for _, iter := range block.seriesBlockIterators {
+ for i, iter := range block.seriesBlockIterators {
id := iter.ID().String()
if seen, found := seenMap[id]; !found {
seenMap[id] = seriesIteratorDetails{
@@ -137,7 +149,7 @@ func (b *encodedBlockBuilder) backfillMissing() error {
end: iter.End(),
id: iter.ID(),
ns: iter.Namespace(),
- tagIter: iter.Tags(),
+ tags: block.seriesMetas[i].Tags,
present: []int{idx},
}
} else {
@@ -170,24 +182,21 @@ func (b *encodedBlockBuilder) backfillMissing() error {
continue
}
- // blockIdx does not contain the present value; need to populate it with
- // an empty series iterator.
- tags, err := cloneTagIterator(iterDetails.tagIter)
- if err != nil {
- return err
- }
-
iter := encoding.NewSeriesIterator(encoding.SeriesIteratorOptions{
ID: ident.StringID(iterDetails.id.String()),
Namespace: ident.StringID(iterDetails.ns.String()),
- Tags: tags,
- StartInclusive: iterDetails.start,
- EndExclusive: iterDetails.end,
+ StartInclusive: xtime.ToUnixNano(iterDetails.start),
+ EndExclusive: xtime.ToUnixNano(iterDetails.end),
}, nil)
- block := bl.block
- block.seriesBlockIterators = append(block.seriesBlockIterators, iter)
- b.blocksAtTime[blockIdx].block = block
+ newBl := bl.block
+ newBl.seriesBlockIterators = append(newBl.seriesBlockIterators, iter)
+ newBl.seriesMetas = append(newBl.seriesMetas, block.SeriesMeta{
+ Name: iter.ID().Bytes(),
+ Tags: iterDetails.tags,
+ })
+
+ b.blocksAtTime[blockIdx].block = newBl
}
}
@@ -197,7 +206,7 @@ func (b *encodedBlockBuilder) backfillMissing() error {
type seriesIteratorDetails struct {
start, end time.Time
id, ns ident.ID
- tagIter ident.TagIterator
+ tags models.Tags
// NB: the indices that this series iterator exists in already.
present []int
}
diff --git a/src/query/ts/m3db/encoded_series_iterator.go b/src/query/ts/m3db/encoded_series_iterator.go
index 70477baec9..725e739d5b 100644
--- a/src/query/ts/m3db/encoded_series_iterator.go
+++ b/src/query/ts/m3db/encoded_series_iterator.go
@@ -62,7 +62,8 @@ type encodedSeriesIter struct {
func (b *encodedBlock) SeriesIter() (block.SeriesIter, error) {
return NewEncodedSeriesIter(
- b.meta, b.seriesMetas, b.seriesBlockIterators, b.options.LookbackDuration(), b.options.Instrumented(),
+ b.meta, b.seriesMetas, b.seriesBlockIterators,
+ b.options.LookbackDuration(), b.options.Instrumented(),
), nil
}
@@ -193,7 +194,8 @@ func iteratorBatchingFn(
}
iter := NewEncodedSeriesIter(
- meta, seriesMetas[start:end], seriesBlockIterators[start:end], opts.LookbackDuration(), opts.Instrumented(),
+ meta, seriesMetas[start:end], seriesBlockIterators[start:end],
+ opts.LookbackDuration(), opts.Instrumented(),
)
iters = append(iters, block.SeriesIterBatch{
diff --git a/src/query/ts/m3db/encoded_step_iterator_test.go b/src/query/ts/m3db/encoded_step_iterator_test.go
index caeb696375..6b9a91f690 100644
--- a/src/query/ts/m3db/encoded_step_iterator_test.go
+++ b/src/query/ts/m3db/encoded_step_iterator_test.go
@@ -24,6 +24,8 @@ import (
"fmt"
"io"
"os"
+ "runtime"
+ "sync"
"testing"
"time"
@@ -34,10 +36,12 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
+ "github.com/m3db/m3/src/query/pools"
+ "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/test"
- "github.com/m3db/m3/src/query/ts/m3db/consolidators"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
+ "github.com/m3db/m3/src/x/pool"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
"github.com/pkg/profile"
@@ -142,7 +146,7 @@ var consolidatedStepIteratorTests = []struct {
func testConsolidatedStepIteratorMinuteLookback(t *testing.T, withPools bool) {
for _, tt := range consolidatedStepIteratorTests {
- opts := NewOptions().
+ opts := newTestOptions().
SetLookbackDuration(1 * time.Minute).
SetSplitSeriesByBlock(false)
require.NoError(t, opts.Validate())
@@ -291,7 +295,7 @@ var consolidatedStepIteratorTestsSplitByBlock = []struct {
func testConsolidatedStepIteratorSplitByBlock(t *testing.T, withPools bool) {
for _, tt := range consolidatedStepIteratorTestsSplitByBlock {
- opts := NewOptions().
+ opts := newTestOptions().
SetLookbackDuration(0).
SetSplitSeriesByBlock(true)
require.NoError(t, opts.Validate())
@@ -306,7 +310,7 @@ func testConsolidatedStepIteratorSplitByBlock(t *testing.T, withPools bool) {
j := 0
idx := verifyBoundsAndGetBlockIndex(t, bounds, block.Meta().Bounds)
- verifyMetas(t, i, block.Meta(), iters.SeriesMeta())
+ verifyMetas(t, i, block.Meta(), iters.SeriesMeta()) // <-
for iters.Next() {
step := iters.Current()
vals := step.Values()
@@ -328,7 +332,7 @@ func TestConsolidatedStepIteratorSplitByBlockSequential(t *testing.T) {
}
func benchmarkSingleBlock(b *testing.B, withPools bool) {
- opts := NewOptions().
+ opts := newTestOptions().
SetLookbackDuration(1 * time.Minute).
SetSplitSeriesByBlock(false)
require.NoError(b, opts.Validate())
@@ -373,6 +377,7 @@ const (
stepSequential iterType = iota
stepParallel
seriesSequential
+ seriesBatch
)
func (t iterType) name(name string) string {
@@ -384,6 +389,8 @@ func (t iterType) name(name string) string {
n = "sequential"
case seriesSequential:
n = "series"
+ case seriesBatch:
+ n = "series_batch"
default:
panic(fmt.Sprint("bad iter type", t))
}
@@ -391,9 +398,47 @@ func (t iterType) name(name string) string {
return fmt.Sprintf("%s_%s", n, name)
}
-func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
+type reset func()
+type stop func()
+
+// newTestOptions provides options with very small/non-existent pools
+// so that memory profiles don't get cluttered with pooled allocated objects.
+func newTestOptions() Options {
+ poolOpts := pool.NewObjectPoolOptions().SetSize(1)
+ bytesPool := pool.NewCheckedBytesPool(nil, poolOpts,
+ func(s []pool.Bucket) pool.BytesPool {
+ return pool.NewBytesPool(s, poolOpts)
+ })
+ bytesPool.Init()
+
+ iteratorPools := pools.BuildIteratorPools(pools.BuildIteratorPoolsOptions{
+ Replicas: 1,
+ SeriesIteratorPoolSize: 1,
+ SeriesIteratorsPoolBuckets: []pool.Bucket{
+ {Capacity: 1, Count: 1},
+ },
+ SeriesIDBytesPoolBuckets: []pool.Bucket{
+ {Capacity: 1, Count: 1},
+ },
+ CheckedBytesWrapperPoolSize: 1,
+ })
+ return newOptions(bytesPool, iteratorPools)
+}
+
+func iterToFetchResult(
+ iters []encoding.SeriesIterator,
+) (consolidators.SeriesFetchResult, error) {
+ iterators := encoding.NewSeriesIterators(iters, nil)
+ return consolidators.NewSeriesFetchResult(
+ iterators,
+ nil,
+ block.NewResultMetadata(),
+ )
+}
+
+func setupBlock(b *testing.B, iterations int, t iterType) (block.Block, reset, stop) {
var (
- seriesCount = 100
+ seriesCount = 1000
replicasCount = 3
start = time.Now()
stepSize = time.Second * 10
@@ -401,20 +446,12 @@ func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
end = start.Add(window)
iters = make([]encoding.SeriesIterator, seriesCount)
itersReset = make([]func(), seriesCount)
- collectors = make([]consolidators.StepCollector, seriesCount)
- peeks = make([]peekValue, seriesCount)
encodingOpts = encoding.NewOptions()
namespaceID = ident.StringID("namespace")
)
for i := 0; i < seriesCount; i++ {
- collectors[i] = consolidators.NewStepLookbackConsolidator(
- stepSize,
- stepSize,
- start,
- consolidators.TakeLast)
-
encoder := m3tsz.NewEncoder(start, checked.NewBytes(nil, nil),
m3tsz.DefaultIntOptimizationEnabled, encodingOpts)
@@ -455,8 +492,7 @@ func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
replicasIters[j] = iter
}
- seriesID := ident.StringID(fmt.Sprintf("foo.%d", i))
-
+ seriesID := ident.StringID(fmt.Sprintf("foo.%d", i+1))
tags, err := ident.NewTagStringsIterator("foo", "bar", "baz", "qux")
require.NoError(b, err)
@@ -464,7 +500,6 @@ func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
encoding.SeriesIteratorOptions{}, nil)
iters[i] = iter
-
itersReset[i] = func() {
// Reset the replica iters.
for _, replica := range replicas {
@@ -479,13 +514,51 @@ func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
Namespace: namespaceID,
Tags: tags,
Replicas: replicasIters,
- StartInclusive: start,
- EndExclusive: end,
+ StartInclusive: xtime.ToUnixNano(start),
+ EndExclusive: xtime.ToUnixNano(end),
})
}
}
usePools := t == stepParallel
+
+ opts := newTestOptions()
+ if usePools {
+ poolOpts := xsync.NewPooledWorkerPoolOptions()
+ readWorkerPools, err := xsync.NewPooledWorkerPool(1024, poolOpts)
+ require.NoError(b, err)
+ readWorkerPools.Init()
+ opts = opts.SetReadWorkerPool(readWorkerPools)
+ }
+
+ for _, reset := range itersReset {
+ reset()
+ }
+
+ res, err := iterToFetchResult(iters)
+ require.NoError(b, err)
+
+ block, err := NewEncodedBlock(
+ res,
+ models.Bounds{
+ Start: start,
+ StepSize: stepSize,
+ Duration: window,
+ }, false, opts)
+
+ require.NoError(b, err)
+ return block, func() {
+ for _, reset := range itersReset {
+ reset()
+ }
+ },
+ setupProf(usePools, iterations)
+}
+
+func setupProf(usePools bool, iterations int) stop {
+ var prof interface {
+ Stop()
+ }
if os.Getenv("PROFILE_TEST_CPU") == "true" {
key := profileTakenKey{
profile: "cpu",
@@ -493,8 +566,7 @@ func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
iterations: iterations,
}
if v := profilesTaken[key]; v == 2 {
- p := profile.Start(profile.CPUProfile)
- defer p.Stop()
+ prof = profile.Start(profile.CPUProfile)
}
profilesTaken[key] = profilesTaken[key] + 1
@@ -508,90 +580,76 @@ func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
}
if v := profilesTaken[key]; v == 2 {
- p := profile.Start(profile.MemProfile)
- defer p.Stop()
+ prof = profile.Start(profile.MemProfile)
}
profilesTaken[key] = profilesTaken[key] + 1
}
-
- if t == seriesSequential {
- sm := make([]block.SeriesMeta, seriesCount)
- for i := range iters {
- sm[i] = block.SeriesMeta{}
+ return func() {
+ if prof != nil {
+ prof.Stop()
}
+ }
+}
- it := encodedSeriesIter{
- idx: -1,
- meta: block.Metadata{
- Bounds: models.Bounds{
- Start: start,
- StepSize: stepSize,
- Duration: window,
- },
- },
+func benchmarkNextIteration(b *testing.B, iterations int, t iterType) {
+ bl, reset, close := setupBlock(b, iterations, t)
+ defer close()
- seriesIters: iters,
- seriesMeta: sm,
- lookbackDuration: time.Minute * 5,
- }
+ if t == seriesSequential {
+ it, err := bl.SeriesIter()
+ require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
- it.idx = -1
- // Reset all the underlying compressed series iterators.
- for _, reset := range itersReset {
- reset()
- }
-
+ reset()
for it.Next() {
}
+
require.NoError(b, it.Err())
}
return
}
- it := &encodedStepIterWithCollector{
- stepTime: start,
- blockEnd: end,
- meta: block.Metadata{
- Bounds: models.Bounds{
- Start: start,
- StepSize: stepSize,
- Duration: window,
- },
- },
+ if t == seriesBatch {
+ batches, err := bl.MultiSeriesIter(runtime.NumCPU())
+ require.NoError(b, err)
- seriesCollectors: collectors,
- seriesPeek: peeks,
- seriesIters: iters,
- }
+ var wg sync.WaitGroup
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ reset()
- if usePools {
- opts := xsync.NewPooledWorkerPoolOptions()
- readWorkerPools, err := xsync.NewPooledWorkerPool(1024, opts)
- require.NoError(b, err)
- readWorkerPools.Init()
- it.workerPool = readWorkerPools
- }
+ for _, batch := range batches {
+ it := batch.Iter
+ wg.Add(1)
+ go func() {
+ for it.Next() {
+ }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- it.stepTime = start
- it.bufferTime = time.Time{}
- it.finished = false
- for i := range it.seriesPeek {
- it.seriesPeek[i] = peekValue{}
- }
+ wg.Done()
+ }()
+ }
- // Reset all the underlying compressed series iterators.
- for _, reset := range itersReset {
- reset()
+ wg.Wait()
+ for _, batch := range batches {
+ require.NoError(b, batch.Iter.Err())
+ }
}
+ return
+ }
+
+ it, err := bl.StepIter()
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ reset()
for it.Next() {
}
+
require.NoError(b, it.Err())
}
}
@@ -606,38 +664,48 @@ var (
profilesTaken = make(map[profileTakenKey]int)
)
-// $ go test -v -run none -bench BenchmarkNextIteration
-// goos: darwin
-// goarch: amd64
-// pkg: github.com/m3db/m3/src/query/ts/m3db
-// BenchmarkNextIteration/sequential_10-12 1776 642349 ns/op
-// BenchmarkNextIteration/parallel_10-12 2538 466186 ns/op
-// BenchmarkNextIteration/series_10-12 1915 601583 ns/op
-
-// BenchmarkNextIteration/sequential_100-12 621 1945963 ns/op
-// BenchmarkNextIteration/parallel_100-12 1118 1042822 ns/op
-// BenchmarkNextIteration/series_100-12 834 1451031 ns/op
-
-// BenchmarkNextIteration/sequential_200-12 398 3002165 ns/op
-// BenchmarkNextIteration/parallel_200-12 699 1613085 ns/op
-// BenchmarkNextIteration/series_200-12 614 1969783 ns/op
-
-// BenchmarkNextIteration/sequential_500-12 214 5522765 ns/op
-// BenchmarkNextIteration/parallel_500-12 382 2904843 ns/op
-// BenchmarkNextIteration/series_500-12 400 2996965 ns/op
-
-// BenchmarkNextIteration/sequential_1000-12 129 9050684 ns/op
-// BenchmarkNextIteration/parallel_1000-12 238 4775567 ns/op
-// BenchmarkNextIteration/series_1000-12 289 4176052 ns/op
-
-// BenchmarkNextIteration/sequential_2000-12 64 16190003 ns/op
-// BenchmarkNextIteration/parallel_2000-12 136 8238382 ns/op
-// BenchmarkNextIteration/series_2000-12 207 5744589 ns/op
+/*
+ $ go test -v -run none -bench BenchmarkNextIteration
+ goos: darwin
+ goarch: amd64
+ pkg: github.com/m3db/m3/src/query/ts/m3db
+
+ BenchmarkNextIteration/sequential_10-12 4112 282491 ns/op
+ BenchmarkNextIteration/parallel_10-12 4214 249335 ns/op
+ BenchmarkNextIteration/series_10-12 4515 248946 ns/op
+ BenchmarkNextIteration/series_batch_10-12 4434 269776 ns/op
+
+ BenchmarkNextIteration/sequential_100-12 4069 267836 ns/op
+ BenchmarkNextIteration/parallel_100-12 4126 283069 ns/op
+ BenchmarkNextIteration/series_100-12 4146 266928 ns/op
+ BenchmarkNextIteration/series_batch_100-12 4399 255991 ns/op
+
+ BenchmarkNextIteration/sequential_200-12 4267 245249 ns/op
+ BenchmarkNextIteration/parallel_200-12 4233 239597 ns/op
+ BenchmarkNextIteration/series_200-12 4365 245924 ns/op
+ BenchmarkNextIteration/series_batch_200-12 4485 235055 ns/op
+
+ BenchmarkNextIteration/sequential_500-12 5108 230085 ns/op
+ BenchmarkNextIteration/parallel_500-12 4802 230694 ns/op
+ BenchmarkNextIteration/series_500-12 4831 229797 ns/op
+ BenchmarkNextIteration/series_batch_500-12 4880 246588 ns/op
+
+ BenchmarkNextIteration/sequential_1000-12 3807 265449 ns/op
+ BenchmarkNextIteration/parallel_1000-12 5062 254942 ns/op
+ BenchmarkNextIteration/series_1000-12 4423 236796 ns/op
+ BenchmarkNextIteration/series_batch_1000-12 4772 251977 ns/op
+
+ BenchmarkNextIteration/sequential_2000-12 4916 243593 ns/op
+ BenchmarkNextIteration/parallel_2000-12 4743 253677 ns/op
+ BenchmarkNextIteration/series_2000-12 4078 256375 ns/op
+ BenchmarkNextIteration/series_batch_2000-12 4465 242323 ns/op
+*/
func BenchmarkNextIteration(b *testing.B) {
iterTypes := []iterType{
stepSequential,
stepParallel,
seriesSequential,
+ seriesBatch,
}
for _, s := range []int{10, 100, 200, 500, 1000, 2000} {
diff --git a/src/query/ts/m3db/options.go b/src/query/ts/m3db/options.go
index 6755890c26..57bd13a666 100644
--- a/src/query/ts/m3db/options.go
+++ b/src/query/ts/m3db/options.go
@@ -26,11 +26,13 @@ import (
"io"
"time"
+ "github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/pools"
+ queryconsolidator "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/ts/m3db/consolidators"
"github.com/m3db/m3/src/x/pool"
xsync "github.com/m3db/m3/src/x/sync"
@@ -49,17 +51,20 @@ var (
)
type encodedBlockOptions struct {
- splitSeries bool
- lookbackDuration time.Duration
- consolidationFn consolidators.ConsolidationFunc
- tagOptions models.TagOptions
- iterAlloc encoding.ReaderIteratorAllocate
- pools encoding.IteratorPools
- checkedPools pool.CheckedBytesPool
- readWorkerPools xsync.PooledWorkerPool
- writeWorkerPools xsync.PooledWorkerPool
- batchingFn IteratorBatchingFn
- instrumented bool
+ splitSeries bool
+ lookbackDuration time.Duration
+ consolidationFn consolidators.ConsolidationFunc
+ tagOptions models.TagOptions
+ iterAlloc encoding.ReaderIteratorAllocate
+ pools encoding.IteratorPools
+ checkedPools pool.CheckedBytesPool
+ readWorkerPools xsync.PooledWorkerPool
+ writeWorkerPools xsync.PooledWorkerPool
+ queryConsolidatorMatchOptions queryconsolidator.MatchOptions
+ seriesIteratorProcessor SeriesIteratorProcessor
+ batchingFn IteratorBatchingFn
+ adminOptions []client.CustomAdminOption
+ instrumented bool
}
type nextDetails struct {
@@ -79,21 +84,26 @@ func NewOptions() Options {
})
bytesPool.Init()
- opts := pool.NewObjectPoolOptions().SetSize(1024)
- batchPool := pool.NewObjectPool(opts)
- batchPool.Init(func() interface{} {
- return nextDetails{}
- })
+ iteratorPools := pools.BuildIteratorPools(pools.BuildIteratorPoolsOptions{})
+ return newOptions(bytesPool, iteratorPools)
+}
+func newOptions(
+ bytesPool pool.CheckedBytesPool,
+ iteratorPools encoding.IteratorPools,
+) Options {
return &encodedBlockOptions{
lookbackDuration: defaultLookbackDuration,
consolidationFn: defaultConsolidationFn,
tagOptions: models.NewTagOptions(),
iterAlloc: defaultIterAlloc,
- pools: pools.BuildIteratorPools(),
+ pools: iteratorPools,
checkedPools: bytesPool,
batchingFn: defaultIteratorBatchingFn,
instrumented: defaultInstrumented,
+ queryConsolidatorMatchOptions: queryconsolidator.MatchOptions{
+ MatchType: queryconsolidator.MatchIDs,
+ },
}
}
@@ -187,6 +197,27 @@ func (o *encodedBlockOptions) WriteWorkerPool() xsync.PooledWorkerPool {
return o.writeWorkerPools
}
+func (o *encodedBlockOptions) SetSeriesConsolidationMatchOptions(
+ value queryconsolidator.MatchOptions) Options {
+ opts := *o
+ opts.queryConsolidatorMatchOptions = value
+ return &opts
+}
+
+func (o *encodedBlockOptions) SeriesConsolidationMatchOptions() queryconsolidator.MatchOptions {
+ return o.queryConsolidatorMatchOptions
+}
+
+func (o *encodedBlockOptions) SetSeriesIteratorProcessor(p SeriesIteratorProcessor) Options {
+ opts := *o
+ opts.seriesIteratorProcessor = p
+ return &opts
+}
+
+func (o *encodedBlockOptions) SeriesIteratorProcessor() SeriesIteratorProcessor {
+ return o.seriesIteratorProcessor
+}
+
func (o *encodedBlockOptions) SetIteratorBatchingFn(fn IteratorBatchingFn) Options {
opts := *o
opts.batchingFn = fn
@@ -197,6 +228,18 @@ func (o *encodedBlockOptions) IteratorBatchingFn() IteratorBatchingFn {
return o.batchingFn
}
+func (o *encodedBlockOptions) SetCustomAdminOptions(
+ val []client.CustomAdminOption) Options {
+ opts := *o
+ opts.adminOptions = val
+ return &opts
+
+}
+
+func (o *encodedBlockOptions) CustomAdminOptions() []client.CustomAdminOption {
+ return o.adminOptions
+}
+
func (o *encodedBlockOptions) SetInstrumented(i bool) Options {
opts := *o
opts.instrumented = i
diff --git a/src/query/ts/m3db/types.go b/src/query/ts/m3db/types.go
index 688cb4f4c4..f0c9407ec0 100644
--- a/src/query/ts/m3db/types.go
+++ b/src/query/ts/m3db/types.go
@@ -21,12 +21,15 @@
package m3db
import (
+ "context"
"time"
+ "github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
+ queryconsolidator "github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/query/ts/m3db/consolidators"
"github.com/m3db/m3/src/x/pool"
xsync "github.com/m3db/m3/src/x/sync"
@@ -74,10 +77,22 @@ type Options interface {
SetWriteWorkerPool(xsync.PooledWorkerPool) Options
// ReadWorkerPool returns the write worker pool for the converter.
WriteWorkerPool() xsync.PooledWorkerPool
+ // SetSeriesConsolidationMatchOptions sets series consolidation options.
+ SetSeriesConsolidationMatchOptions(value queryconsolidator.MatchOptions) Options
+ // SetSeriesConsolidationMatchOptions sets series consolidation options.
+ SeriesConsolidationMatchOptions() queryconsolidator.MatchOptions
+ // SetSeriesIteratorProcessor sets the series iterator processor.
+ SetSeriesIteratorProcessor(SeriesIteratorProcessor) Options
+ // SeriesIteratorProcessor returns the series iterator processor.
+ SeriesIteratorProcessor() SeriesIteratorProcessor
// SetIteratorBatchingFn sets the batching function for the converter.
SetIteratorBatchingFn(IteratorBatchingFn) Options
// IteratorBatchingFn returns the batching function for the converter.
IteratorBatchingFn() IteratorBatchingFn
+ // SetCustomAdminOptions sets custom admin options.
+ SetCustomAdminOptions([]client.CustomAdminOption) Options
+ // CustomAdminOptions gets custom admin options.
+ CustomAdminOptions() []client.CustomAdminOption
// SetInstrumented marks if the encoding step should have instrumentation enabled.
SetInstrumented(bool) Options
// Instrumented returns if the encoding step should have instrumentation enabled.
@@ -86,6 +101,12 @@ type Options interface {
Validate() error
}
+// SeriesIteratorProcessor optionally defines methods to process series iterators.
+type SeriesIteratorProcessor interface {
+ // InspectSeries inspects SeriesIterator slices.
+ InspectSeries(ctx context.Context, seriesIterators []encoding.SeriesIterator) error
+}
+
// IteratorBatchingFn determines how the iterator is split into batches.
type IteratorBatchingFn func(
concurrency int,
diff --git a/src/query/ts/metadata.go b/src/query/ts/metadata.go
new file mode 100644
index 0000000000..ab451c3195
--- /dev/null
+++ b/src/query/ts/metadata.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ts
+
+// MetricType is the enum for metric types.
+type MetricType int
+
+const (
+ // MetricTypeGauge is the gauge metric type.
+ MetricTypeGauge MetricType = iota
+
+ // MetricTypeCounter is the counter metric type.
+ MetricTypeCounter
+
+ // MetricTypeTimer is the timer metric type.
+ MetricTypeTimer
+)
+
+// SourceType is the enum for metric source types.
+type SourceType int
+
+const (
+ // SourceTypePrometheus is the prometheus source type.
+ SourceTypePrometheus SourceType = iota
+
+ // SourceTypeGraphite is the graphite source type.
+ SourceTypeGraphite
+)
+
+// SeriesAttributes has attributes about the time series.
+type SeriesAttributes struct {
+ Type MetricType
+ Source SourceType
+}
+
+// DefaultSeriesAttributes returns a default series attributes.
+func DefaultSeriesAttributes() SeriesAttributes {
+ return SeriesAttributes{
+ Type: MetricTypeGauge,
+ Source: SourceTypePrometheus,
+ }
+}
+
+// Metadata is metadata associated with a time series.
+type Metadata struct {
+ DropUnaggregated bool
+}
diff --git a/src/query/ts/ts_mock.go b/src/query/ts/ts_mock.go
index d32b9c4efa..183834e1a6 100644
--- a/src/query/ts/ts_mock.go
+++ b/src/query/ts/ts_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/query/ts (interfaces: Values)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/query/ts/values.go b/src/query/ts/values.go
index d98b27a9e2..1fd83a7c31 100644
--- a/src/query/ts/values.go
+++ b/src/query/ts/values.go
@@ -304,7 +304,6 @@ func newFixedStepValues(
startTime time.Time,
) *fixedResolutionValues {
values := make([]float64, numSteps)
- // Faster way to initialize an array instead of a loop
util.Memset(values, initialValue)
return &fixedResolutionValues{
resolution: resolution,
diff --git a/src/query/util/timing.go b/src/query/util/timing.go
index 1c7ab0837f..6d15467dce 100644
--- a/src/query/util/timing.go
+++ b/src/query/util/timing.go
@@ -25,12 +25,23 @@ import (
"math"
"strconv"
"time"
+
+ "github.com/prometheus/common/model"
+)
+
+var (
+ minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
+ maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
+
+ minTimeFormatted = minTime.Format(time.RFC3339Nano)
+ maxTimeFormatted = maxTime.Format(time.RFC3339Nano)
)
-// ParseTimeString parses a time string into time.Time
+// ParseTimeString parses a time string into time.Time.
func ParseTimeString(s string) (time.Time, error) {
if t, err := strconv.ParseFloat(s, 64); err == nil {
s, ns := math.Modf(t)
+ ns = math.Round(ns*1000) / 1000
return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
}
@@ -38,9 +49,47 @@ func ParseTimeString(s string) (time.Time, error) {
return t, nil
}
+ // Stdlib's time parser can only handle 4 digit years. As a workaround until
+ // that is fixed we want to at least support our own boundary times.
+ // Context: https://github.com/prometheus/client_golang/issues/614
+ // Upstream issue: https://github.com/golang/go/issues/20555
+ switch s {
+ case minTimeFormatted:
+ return time.Unix(0, 0), nil
+ case maxTimeFormatted:
+ return time.Now(), nil
+ }
+
return time.Time{}, fmt.Errorf("invalid timestamp for %s", s)
}
+// ParseTimeStringWithDefault parses a time string into time.Time.
+func ParseTimeStringWithDefault(
+ s string,
+ defaultTime time.Time,
+) (time.Time, error) {
+ if s != "" {
+ return ParseTimeString(s)
+ }
+ return defaultTime, nil
+}
+
+// ParseDurationString parses a string duration allows for
+// float seconds and also time strings such as 7d5h, etc.
+func ParseDurationString(s string) (time.Duration, error) {
+ if d, err := strconv.ParseFloat(s, 64); err == nil {
+ ts := d * float64(time.Second)
+ if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
+ return 0, fmt.Errorf("cannot parse %q to a valid duration. It overflows int64", s)
+ }
+ return time.Duration(ts), nil
+ }
+ if d, err := model.ParseDuration(s); err == nil {
+ return time.Duration(d), nil
+ }
+ return 0, fmt.Errorf("cannot parse %q to a valid duration", s)
+}
+
// DurationToMS converts a duration into milliseconds
func DurationToMS(duration time.Duration) int64 {
return duration.Nanoseconds() / int64(time.Millisecond)
diff --git a/src/query/util/timing_test.go b/src/query/util/timing_test.go
index 12485bc140..4f58011cfe 100644
--- a/src/query/util/timing_test.go
+++ b/src/query/util/timing_test.go
@@ -35,3 +35,38 @@ func TestParseTimeString(t *testing.T) {
equalTimes := parsedTime.Equal(time.Unix(703354793, 0))
assert.True(t, equalTimes)
}
+
+func TestParseTimeStringWithMinMaxGoTime(t *testing.T) {
+ parsedTime, err := ParseTimeString(minTimeFormatted)
+ require.NoError(t, err)
+ require.True(t, parsedTime.Equal(time.Unix(0, 0)))
+
+ parsedTime, err = ParseTimeString(maxTimeFormatted)
+ require.NoError(t, err)
+ require.True(t, time.Now().Sub(parsedTime) < time.Minute)
+}
+
+func TestParseTimeStringLargeFloat(t *testing.T) {
+ _, err := ParseTimeString("9999999999999.99999")
+ require.NoError(t, err)
+}
+
+func TestParseTimeStringWithDefault(t *testing.T) {
+ defaultTime := time.Now().Add(-1 * time.Minute)
+ parsedTime, err := ParseTimeStringWithDefault("", defaultTime)
+ require.NoError(t, err)
+ require.True(t, defaultTime.Equal(parsedTime))
+}
+
+func TestParseDurationStringFloat(t *testing.T) {
+ d, err := ParseDurationString("1595545968.4985256")
+ require.NoError(t, err)
+ v := time.Duration(1595545968.4985256 * float64(time.Second))
+ require.Equal(t, v, d)
+}
+
+func TestParseDurationStringExtendedDurationString(t *testing.T) {
+ d, err := ParseDurationString("2d")
+ require.NoError(t, err)
+ require.Equal(t, 2*24*time.Hour, d)
+}
diff --git a/src/x/close/close.go b/src/x/close/close.go
index 5e71e3dbf7..1b652005fa 100644
--- a/src/x/close/close.go
+++ b/src/x/close/close.go
@@ -37,11 +37,27 @@ type Closer interface {
io.Closer
}
+// CloserFn implements the SimpleCloser interface.
+type CloserFn func() error
+
+// Close implements the SimplerCloser interface.
+func (fn CloserFn) Close() error {
+ return fn()
+}
+
// SimpleCloser is a resource that can be closed without returning a result.
type SimpleCloser interface {
Close()
}
+// SimpleCloserFn implements the SimpleCloser interface.
+type SimpleCloserFn func()
+
+// Close implements the SimplerCloser interface.
+func (fn SimpleCloserFn) Close() {
+ fn()
+}
+
// TryClose attempts to close a resource, the resource is expected to
// implement either Closeable or CloseableResult.
func TryClose(r interface{}) error {
diff --git a/src/x/context/finalizeable_list_gen.go b/src/x/context/finalizeable_list_gen.go
index 5240da8e84..f8d7eadd34 100644
--- a/src/x/context/finalizeable_list_gen.go
+++ b/src/x/context/finalizeable_list_gen.go
@@ -25,6 +25,8 @@
package context
import (
+ "sync"
+
"github.com/m3db/m3/src/x/pool"
)
@@ -129,11 +131,26 @@ func (l *finalizeableList) Init() *finalizeableList {
l.root.prev = &l.root
l.len = 0
if l.Pool == nil {
- l.Pool = newFinalizeableElementPool(nil)
+ // Use a static pool at least, otherwise each time
+ // we create a list with no pool we create a wholly
+ // new pool of finalizeables (4096 of them).
+ defaultElementPoolOnce.Do(initElementPool)
+ l.Pool = defaultElementPool
}
return l
}
+var (
+ defaultElementPoolOnce sync.Once
+ defaultElementPool *finalizeableElementPool
+)
+
+// define as a static method so lambda alloc not required
+// when passing function pointer to sync.Once.Do.
+func initElementPool() {
+ defaultElementPool = newFinalizeableElementPool(nil)
+}
+
// newFinalizeableList returns an initialized list.
func newFinalizeableList(p *finalizeableElementPool) *finalizeableList {
l := &finalizeableList{Pool: p}
diff --git a/src/x/debug/debug.go b/src/x/debug/debug.go
index 18590dc8dd..4dc2d20d1a 100644
--- a/src/x/debug/debug.go
+++ b/src/x/debug/debug.go
@@ -93,8 +93,12 @@ func NewPlacementAndNamespaceZipWriterWithDefaultSources(
}
if clusterClient != nil {
- err = zw.RegisterSource("namespace.json",
- NewNamespaceInfoSource(clusterClient, instrumentOpts))
+ nsSource, err := NewNamespaceInfoSource(clusterClient, services, instrumentOpts)
+ if err != nil {
+ return nil, fmt.Errorf("could not create namespace info source: %w", err)
+ }
+
+ err = zw.RegisterSource("namespace.json", nsSource)
if err != nil {
return nil, fmt.Errorf("unable to register namespaceSource: %s", err)
}
diff --git a/src/x/debug/debug_test.go b/src/x/debug/debug_test.go
index 35e2a71086..63eb4f5280 100644
--- a/src/x/debug/debug_test.go
+++ b/src/x/debug/debug_test.go
@@ -215,7 +215,7 @@ func TestHTTPEndpoint(t *testing.T) {
})
}
-func newHandlerOptsAndClient(t *testing.T) (placement.HandlerOptions, *clusterclient.MockClient) {
+func newHandlerOptsAndClient(t *testing.T) (placement.HandlerOptions, *kv.MockStore, *clusterclient.MockClient) {
placementProto := &placementpb.Placement{
Instances: map[string]*placementpb.Instance{
"host1": &placementpb.Instance{
@@ -269,7 +269,7 @@ func newHandlerOptsAndClient(t *testing.T) (placement.HandlerOptions, *clustercl
mockClient, config.Configuration{}, nil, instrument.NewOptions())
require.NoError(t, err)
- return handlerOpts, mockClient
+ return handlerOpts, mockKV, mockClient
}
func TestDefaultSources(t *testing.T) {
@@ -282,9 +282,10 @@ func TestDefaultSources(t *testing.T) {
"placement-m3db.json",
}
- handlerOpts, mockClient := newHandlerOptsAndClient(t)
+ handlerOpts, mockKV, mockClient := newHandlerOptsAndClient(t)
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil)
svcDefaults := []handleroptions.ServiceNameAndDefaults{{
- ServiceName: "m3db",
+ ServiceName: handleroptions.M3DBServiceName,
}}
zw, err := NewPlacementAndNamespaceZipWriterWithDefaultSources(1*time.Second, mockClient, handlerOpts, svcDefaults, instrument.NewOptions())
require.NoError(t, err)
diff --git a/src/x/debug/namespace.go b/src/x/debug/namespace.go
index df27dc9acc..e3599250e3 100644
--- a/src/x/debug/namespace.go
+++ b/src/x/debug/namespace.go
@@ -23,11 +23,13 @@ package debug
import (
"bytes"
"encoding/json"
+ "fmt"
"io"
"net/http"
clusterclient "github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/query/api/v1/handler/namespace"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/generated/proto/admin"
"github.com/m3db/m3/src/x/instrument"
xhttp "github.com/m3db/m3/src/x/net/http"
@@ -36,25 +38,46 @@ import (
)
type namespaceInfoSource struct {
- handler *namespace.GetHandler
+ handler *namespace.GetHandler
+ defaults handleroptions.ServiceNameAndDefaults
}
// NewNamespaceInfoSource returns a Source for namespace information.
func NewNamespaceInfoSource(
clusterClient clusterclient.Client,
+ allDefaults []handleroptions.ServiceNameAndDefaults,
instrumentOpts instrument.Options,
-) Source {
+) (Source, error) {
+ var (
+ m3dbDefault handleroptions.ServiceNameAndDefaults
+ found bool
+ )
+ for _, def := range allDefaults {
+ if def.ServiceName == handleroptions.M3DBServiceName {
+ m3dbDefault = def
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return nil, fmt.Errorf("could not find M3DB service in defaults: %v", allDefaults)
+ }
+
handler := namespace.NewGetHandler(clusterClient,
instrumentOpts)
+
return &namespaceInfoSource{
- handler: handler,
- }
+ handler: handler,
+ defaults: m3dbDefault,
+ }, nil
}
// Write fetches data about the namespace and writes it in the given writer.
// The data is formatted in json.
-func (n *namespaceInfoSource) Write(w io.Writer, _ *http.Request) error {
- nsRegistry, err := n.handler.Get()
+func (n *namespaceInfoSource) Write(w io.Writer, r *http.Request) error {
+ opts := handleroptions.NewServiceOptions(n.defaults, r.Header, nil)
+ nsRegistry, err := n.handler.Get(opts)
if err != nil {
return err
}
diff --git a/src/x/debug/namespace_test.go b/src/x/debug/namespace_test.go
index 0af409aae3..c1b8eb8d44 100644
--- a/src/x/debug/namespace_test.go
+++ b/src/x/debug/namespace_test.go
@@ -25,15 +25,23 @@ import (
"net/http"
"testing"
+ "github.com/golang/mock/gomock"
+ "github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/x/instrument"
"github.com/stretchr/testify/require"
)
func TestNamespaceSource(t *testing.T) {
- _, mockClient := newHandlerOptsAndClient(t)
+ _, mockKV, mockClient := newHandlerOptsAndClient(t)
+ mockClient.EXPECT().Store(gomock.Any()).Return(mockKV, nil)
iOpts := instrument.NewOptions()
- n := NewNamespaceInfoSource(mockClient, iOpts)
+ n, err := NewNamespaceInfoSource(mockClient, []handleroptions.ServiceNameAndDefaults{
+ {
+ ServiceName: handleroptions.M3DBServiceName,
+ },
+ }, iOpts)
+ require.NoError(t, err)
buff := bytes.NewBuffer([]byte{})
n.Write(buff, &http.Request{})
diff --git a/src/x/debug/placement_test.go b/src/x/debug/placement_test.go
index 7aa24282b8..1ed8649bba 100644
--- a/src/x/debug/placement_test.go
+++ b/src/x/debug/placement_test.go
@@ -32,7 +32,7 @@ import (
)
func TestPlacementSource(t *testing.T) {
- handlerOpts, _ := newHandlerOptsAndClient(t)
+ handlerOpts, _, _ := newHandlerOptsAndClient(t)
iOpts := instrument.NewOptions()
svcDefaults := handleroptions.ServiceNameAndDefaults{
ServiceName: "m3db",
diff --git a/src/x/debug/triggering_profile.go b/src/x/debug/triggering_profile.go
new file mode 100644
index 0000000000..473e8b6756
--- /dev/null
+++ b/src/x/debug/triggering_profile.go
@@ -0,0 +1,212 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package debug
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime/pprof"
+ "text/template"
+ "time"
+
+ "github.com/m3db/m3/src/x/instrument"
+
+ "go.uber.org/zap"
+)
+
+const (
+ // ContinuousCPUProfileName is the name of continuous CPU profile.
+ ContinuousCPUProfileName = "cpu"
+)
+
+var (
+ defaultConditional = func() bool { return true }
+ defaultInterval = time.Second
+ errNoFilePathTemplate = errors.New("no file path template")
+ errNoProfileName = errors.New("no profile name")
+ errNoInstrumentOptions = errors.New("no instrument options")
+ errAlreadyOpen = errors.New("already open")
+ errNotOpen = errors.New("not open")
+)
+
+// ContinuousFileProfile is a profile that runs continously
+// to a file using a template for a file name.
+type ContinuousFileProfile struct {
+ filePathTemplate *template.Template
+ profileName string
+ profileDuration time.Duration
+ profileDebug int
+ conditional func() bool
+ interval time.Duration
+
+ logger *zap.Logger
+
+ closeCh chan struct{}
+}
+
+// ContinuousFileProfileOptions is a set of continuous file profile options.
+type ContinuousFileProfileOptions struct {
+ FilePathTemplate string
+ ProfileName string
+ ProfileDuration time.Duration
+ ProfileDebug int
+ Conditional func() bool
+ Interval time.Duration
+ InstrumentOptions instrument.Options
+}
+
+// ContinuousFileProfilePathParams is the params used to construct
+// a file path.
+type ContinuousFileProfilePathParams struct {
+ ProfileName string
+ UnixTime int64
+}
+
+// NewContinuousFileProfile returns a new continuous file profile.
+func NewContinuousFileProfile(
+ opts ContinuousFileProfileOptions,
+) (*ContinuousFileProfile, error) {
+ if opts.FilePathTemplate == "" {
+ return nil, errNoFilePathTemplate
+ }
+ if opts.ProfileName == "" {
+ return nil, errNoProfileName
+ }
+ if opts.Conditional == nil {
+ opts.Conditional = defaultConditional
+ }
+ if opts.Interval == 0 {
+ opts.Interval = defaultInterval
+ }
+ if opts.InstrumentOptions == nil {
+ return nil, errNoInstrumentOptions
+ }
+
+ tmpl, err := template.New("fileName").Parse(opts.FilePathTemplate)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ContinuousFileProfile{
+ filePathTemplate: tmpl,
+ profileName: opts.ProfileName,
+ profileDuration: opts.ProfileDuration,
+ profileDebug: opts.ProfileDebug,
+ conditional: opts.Conditional,
+ interval: opts.Interval,
+ logger: opts.InstrumentOptions.Logger(),
+ }, nil
+}
+
+// Start will start the continuous file profile.
+func (c *ContinuousFileProfile) Start() error {
+ if c.closeCh != nil {
+ return errAlreadyOpen
+ }
+
+ c.closeCh = make(chan struct{})
+ go c.run()
+ return nil
+}
+
+// Stop will stop the continuous file profile.
+func (c *ContinuousFileProfile) Stop() error {
+ if c.closeCh == nil {
+ return errNotOpen
+ }
+
+ close(c.closeCh)
+ c.closeCh = nil
+
+ return nil
+}
+
+func (c *ContinuousFileProfile) run() {
+ closeCh := c.closeCh
+ ticker := time.NewTicker(c.interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-closeCh:
+ return
+ case <-ticker.C:
+ if !c.conditional() {
+ continue
+ }
+
+ err := c.profile()
+ if err != nil {
+ c.logger.Error("continuous profile error",
+ zap.String("name", c.profileName),
+ zap.Int("debug", c.profileDebug),
+ zap.Duration("interval", c.interval),
+ zap.Error(err))
+ }
+ }
+ }
+}
+
+func (c *ContinuousFileProfile) profile() error {
+ filePathBuffer := bytes.NewBuffer(nil)
+ filePathParams := ContinuousFileProfilePathParams{
+ ProfileName: c.profileName,
+ UnixTime: time.Now().Unix(),
+ }
+ err := c.filePathTemplate.Execute(filePathBuffer, filePathParams)
+ if err != nil {
+ return err
+ }
+
+ w, err := os.Create(filePathBuffer.String())
+ if err != nil {
+ return err
+ }
+
+ success := false
+ defer func() {
+ if !success {
+ _ = w.Close()
+ }
+ }()
+
+ switch c.profileName {
+ case ContinuousCPUProfileName:
+ if err := pprof.StartCPUProfile(w); err != nil {
+ return err
+ }
+ time.Sleep(c.profileDuration)
+ pprof.StopCPUProfile()
+ default:
+ p := pprof.Lookup(c.profileName)
+ if p == nil {
+ return fmt.Errorf("unknown profile: %s", c.profileName)
+ }
+ if err := p.WriteTo(w, c.profileDebug); err != nil {
+ return err
+ }
+ }
+
+ success = true
+ return w.Close()
+}
diff --git a/src/x/debug/triggering_profile_test.go b/src/x/debug/triggering_profile_test.go
new file mode 100644
index 0000000000..b769b1cd1e
--- /dev/null
+++ b/src/x/debug/triggering_profile_test.go
@@ -0,0 +1,123 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package debug
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/m3db/m3/src/x/instrument"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
+)
+
+func TestContinuousFileProfile(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ duration time.Duration
+ debug int
+ }{
+ {
+ name: ContinuousCPUProfileName,
+ duration: time.Second,
+ },
+ {
+ name: "goroutine",
+ debug: 2,
+ },
+ } {
+ test := test
+ name := fmt.Sprintf("%s_%s_%d", test.name, test.duration, test.debug)
+ t.Run(name, func(t *testing.T) {
+ dir, err := ioutil.TempDir("", "")
+ require.NoError(t, err)
+
+ defer os.RemoveAll(dir)
+
+ ext := ".profile"
+
+ cond := atomic.NewBool(false)
+ opts := ContinuousFileProfileOptions{
+ FilePathTemplate: path.Join(dir, "{{.ProfileName}}-{{.UnixTime}}"+ext),
+ ProfileName: test.name,
+ ProfileDuration: test.duration,
+ ProfileDebug: test.debug,
+ Interval: 20 * time.Millisecond,
+ Conditional: cond.Load,
+ InstrumentOptions: instrument.NewTestOptions(t),
+ }
+
+ profile, err := NewContinuousFileProfile(opts)
+ require.NoError(t, err)
+
+ require.NoError(t, profile.Start())
+
+ for i := 0; i < 10; i++ {
+ // Make sure doesn't create files until conditional returns true
+ time.Sleep(opts.Interval)
+
+ files, err := ioutil.ReadDir(dir)
+ require.NoError(t, err)
+
+ for _, f := range files {
+ if strings.HasSuffix(f.Name(), ext) {
+ require.FailNow(t, "conditional false, profile generated")
+ }
+ }
+ }
+
+ // Trigger profile.
+ cond.Store(true)
+
+ for start := time.Now(); time.Since(start) < 10*time.Second; {
+ files, err := ioutil.ReadDir(dir)
+ require.NoError(t, err)
+
+ profiles := 0
+ for _, f := range files {
+ if strings.HasSuffix(f.Name(), ext) {
+ pattern := test.name + `-[0-9]+\` + ext
+ re := regexp.MustCompile(pattern)
+ require.True(t, re.MatchString(f.Name()),
+ fmt.Sprintf(
+ "file should match pattern: pattern=%s, actual=%s",
+ pattern, f.Name()))
+ profiles++
+ }
+ }
+
+ if profiles >= 2 {
+ // Successfully continuously emitting.
+ return
+ }
+ }
+
+ require.FailNow(t, "did not generate profiles")
+ })
+ }
+}
diff --git a/src/x/docs/docs.go b/src/x/docs/docs.go
index a31999d539..54f0cbce6f 100644
--- a/src/x/docs/docs.go
+++ b/src/x/docs/docs.go
@@ -41,7 +41,7 @@ const (
// Path returns the url to the given section of documentation.
func Path(section string) string {
- return fmt.Sprintf("https://m3db.github.io/m3/%s", section)
+ return fmt.Sprintf("https://docs.m3db.io/%s", section)
}
// RepoPathURL is a URL that points to a path in the repository, helpful
diff --git a/src/x/docs/docs_test.go b/src/x/docs/docs_test.go
index 0de21b6f53..d4a29832fa 100644
--- a/src/x/docs/docs_test.go
+++ b/src/x/docs/docs_test.go
@@ -28,7 +28,7 @@ import (
func TestPath(t *testing.T) {
p := Path("foo")
- assert.Equal(t, "https://m3db.github.io/m3/foo", p)
+ assert.Equal(t, "https://docs.m3db.io/foo", p)
}
func TestParseRepoPathURL(t *testing.T) {
diff --git a/src/x/generated-source-files.mk b/src/x/generated-source-files.mk
index 324c224308..6da7c3f652 100644
--- a/src/x/generated-source-files.mk
+++ b/src/x/generated-source-files.mk
@@ -2,7 +2,7 @@ gopath_prefix := $(GOPATH)/src
m3x_package := github.com/m3db/m3/src/x
m3x_package_path := $(gopath_prefix)/$(m3x_package)
temp_suffix := _temp
-gorename_package := github.com/prateek/gorename
+gorename_package := github.com/robskillington/gorename
gorename_package_version := 52c7307cddd221bb98f0a3215216789f3c821b10
# Tests that all currently generated types match their contents if they were regenerated
@@ -75,7 +75,7 @@ hashmap-gen-rename-helper:
key_type_alias ?= $(key_type)
value_type_alias ?= $(value_type)
.PHONY: hashmap-gen-rename
-hashmap-gen-rename: install-gorename
+hashmap-gen-rename:
$(eval out_dir=$(gopath_prefix)/$(target_package))
$(eval temp_outdir=$(out_dir)$(temp_suffix))
echo $(temp_outdir)
@@ -155,7 +155,7 @@ endif
elem_type_alias ?= $(elem_type)
.PHONY: arraypool-gen-rename
-arraypool-gen-rename: install-gorename
+arraypool-gen-rename:
$(eval temp_outdir=$(out_dir)$(temp_suffix))
ifneq ($(rename_gen_types),)
# Allow users to short circuit the generation of types.go if they don't need it.
@@ -197,7 +197,7 @@ endif
elem_type_alias ?= $(elem_type)
.PHONY: list-gen-rename
-list-gen-rename: install-gorename
+list-gen-rename:
$(eval temp_outdir=$(out_dir)$(temp_suffix))
ifneq ($(rename_gen_types),)
# Allow users to short circuit the generation of types.go if they don't need it.
@@ -213,14 +213,3 @@ endif
ifneq ($(rename_gen_types),)
rm $(temp_outdir)/types.go
endif
-
-install-gorename:
- $(eval gorename_dir=$(gopath_prefix)/$(gorename_package))
- @([ -d $(gorename_dir) ] && which gorename >/dev/null ) || \
- (echo "Downloading specified gorename" && \
- go get -d $(gorename_package) && \
- cd $(gopath_prefix)/$(gorename_package) && \
- git checkout $(gorename_package_version) && \
- glide install -v && go install && \
- echo "Successfully installed gorename") 2>/dev/null
- @which gorename > /dev/null || (echo "gorename install failed" && exit 1)
diff --git a/src/x/generics/hashmap/byteskey/new_map.go b/src/x/generics/hashmap/byteskey/new_map.go
index 2142b6ab9b..62ede699b1 100644
--- a/src/x/generics/hashmap/byteskey/new_map.go
+++ b/src/x/generics/hashmap/byteskey/new_map.go
@@ -25,7 +25,7 @@ import (
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
"github.com/mauricelam/genny/generic"
)
diff --git a/src/x/generics/hashmap/idkey/new_map.go b/src/x/generics/hashmap/idkey/new_map.go
index 6d1e4234f6..752635f7e3 100644
--- a/src/x/generics/hashmap/idkey/new_map.go
+++ b/src/x/generics/hashmap/idkey/new_map.go
@@ -24,7 +24,7 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
"github.com/mauricelam/genny/generic"
)
diff --git a/src/x/generics/hashmap/map_test.go b/src/x/generics/hashmap/map_test.go
index a9cb64326c..b373f62bcb 100644
--- a/src/x/generics/hashmap/map_test.go
+++ b/src/x/generics/hashmap/map_test.go
@@ -23,7 +23,7 @@ package hashmap
import (
"testing"
- "github.com/cespare/xxhash"
+ "github.com/cespare/xxhash/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/src/x/generics/list/list.go b/src/x/generics/list/list.go
index c1ce49c559..5e7a99c799 100644
--- a/src/x/generics/list/list.go
+++ b/src/x/generics/list/list.go
@@ -56,6 +56,8 @@
package list
import (
+ "sync"
+
"github.com/m3db/m3/src/x/pool"
"github.com/mauricelam/genny/generic"
@@ -110,11 +112,26 @@ func (l *List) Init() *List {
l.root.prev = &l.root
l.len = 0
if l.Pool == nil {
- l.Pool = newElementPool(nil)
+ // Use a static pool at least, otherwise each time
+ // we create a list with no pool we create a wholly
+ // new pool of finalizeables (4096 of them).
+ defaultElementPoolOnce.Do(initElementPool)
+ l.Pool = defaultElementPool
}
return l
}
+var (
+ defaultElementPoolOnce sync.Once
+ defaultElementPool *ElementPool
+)
+
+// define as a static method so lambda alloc not required
+// when passing function pointer to sync.Once.Do.
+func initElementPool() {
+ defaultElementPool = newElementPool(nil)
+}
+
// newList returns an initialized list.
func newList(p *ElementPool) *List {
l := &List{Pool: p}
diff --git a/src/x/headers/headers.go b/src/x/headers/headers.go
new file mode 100644
index 0000000000..74936dad32
--- /dev/null
+++ b/src/x/headers/headers.go
@@ -0,0 +1,123 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package headers
+
+const (
+ // M3HeaderPrefix is the prefix all M3-specific headers that affect query or
+ // write behavior (not necessarily m3admin headers) are guaranteed to have.
+ M3HeaderPrefix = "M3-"
+
+ // WarningsHeader is the M3 warnings header when to display a warning to a
+ // user.
+ WarningsHeader = M3HeaderPrefix + "Warnings"
+
+ // RetryHeader is the M3 retry header to display when it is safe to retry.
+ RetryHeader = M3HeaderPrefix + "Retry"
+
+ // ServedByHeader is the M3 query storage execution breakdown.
+ ServedByHeader = M3HeaderPrefix + "Storage-By"
+
+ // DeprecatedHeader is the M3 deprecated header.
+ DeprecatedHeader = M3HeaderPrefix + "Deprecated"
+
+ // MetricsTypeHeader sets the write or read metrics type to restrict
+ // metrics to.
+ // Valid values are "unaggregated" or "aggregated".
+ MetricsTypeHeader = M3HeaderPrefix + "Metrics-Type"
+
+ // WriteTypeHeader is a header that controls if default
+ // writes should be written to both unaggregated and aggregated
+ // namespaces, or if unaggregated values are skipped and
+ // only aggregated values are written.
+ // Valid values are "default" or "aggregate".
+ WriteTypeHeader = M3HeaderPrefix + "Write-Type"
+
+ // DefaultWriteType is the default write type.
+ DefaultWriteType = "default"
+
+ // AggregateWriteType is the aggregate write type. This writes to
+ // only aggregated namespaces
+ AggregateWriteType = "aggregate"
+
+ // MetricsStoragePolicyHeader specifies the resolution and retention of
+ // metrics being written or read.
+ // In the form of a storage policy string, e.g. "1m:14d".
+ // Only required if the metrics type header does not specify unaggregated
+ // metrics type.
+ MetricsStoragePolicyHeader = M3HeaderPrefix + "Storage-Policy"
+
+ // RestrictByTagsJSONHeader provides tag options to enforces on queries,
+ // in JSON format. See `handler.stringTagOptions` for definitions.`
+ RestrictByTagsJSONHeader = M3HeaderPrefix + "Restrict-By-Tags-JSON"
+
+ // MapTagsByJSONHeader provides the ability to mutate tags of timeseries in
+ // incoming write requests. See `MapTagsOptions` for structure.
+ MapTagsByJSONHeader = M3HeaderPrefix + "Map-Tags-JSON"
+
+ // LimitMaxSeriesHeader is the M3 limit timeseries header that limits
+ // the number of time series returned by each storage node.
+ LimitMaxSeriesHeader = M3HeaderPrefix + "Limit-Max-Series"
+
+ // LimitMaxDocsHeader is the M3 limit docs header that limits
+ // the number of docs returned by each storage node.
+ LimitMaxDocsHeader = M3HeaderPrefix + "Limit-Max-Docs"
+
+ // LimitRequireExhaustiveHeader is the M3 limit exhaustive header that will
+ // ensure M3 returns an error if the results set is not exhaustive.
+ LimitRequireExhaustiveHeader = M3HeaderPrefix + "Limit-Require-Exhaustive"
+
+ // UnaggregatedStoragePolicy specifies the unaggregated storage policy.
+ UnaggregatedStoragePolicy = "unaggregated"
+
+ // DefaultServiceEnvironment is the default service ID environment.
+ DefaultServiceEnvironment = "default_env"
+ // DefaultServiceZone is the default service ID zone.
+ DefaultServiceZone = "embedded"
+
+ // HeaderClusterEnvironmentName is the header used to specify the
+ // environment name.
+ HeaderClusterEnvironmentName = "Cluster-Environment-Name"
+ // HeaderClusterZoneName is the header used to specify the zone name.
+ HeaderClusterZoneName = "Cluster-Zone-Name"
+ // HeaderDryRun is the header used to specify whether this should be a dry
+ // run.
+ HeaderDryRun = "Dry-Run"
+ // HeaderForce is the header used to specify whether this should be a forced
+ // operation.
+ HeaderForce = "Force"
+
+ // LimitHeader is the header added when returned series are limited.
+ LimitHeader = M3HeaderPrefix + "Results-Limited"
+
+ // LimitHeaderSeriesLimitApplied is the header applied when fetch results
+ // are maxed.
+ LimitHeaderSeriesLimitApplied = "max_fetch_series_limit_applied"
+
+ // RenderFormat is used to switch result format for query results rendering.
+ RenderFormat = M3HeaderPrefix + "Render-Format"
+
+ // JSONDisableDisallowUnknownFields is header if set to true that allows
+ // for clients to send fields unknown by a HTTP/JSON endpoint and still
+ // parse the request, this is helpful for sending a request with a new
+ // schema to an older instance and still have it respond successfully
+ // using the fields it knows about.
+ JSONDisableDisallowUnknownFields = M3HeaderPrefix + "JSON-Disable-Disallow-Unknown-Fields"
+)
diff --git a/src/x/ident/ident_mock.go b/src/x/ident/ident_mock.go
index 10e8ad2b9b..5d7170131d 100644
--- a/src/x/ident/ident_mock.go
+++ b/src/x/ident/ident_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/x/ident (interfaces: ID,TagIterator)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -265,3 +265,15 @@ func (mr *MockTagIteratorMockRecorder) Remaining() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remaining", reflect.TypeOf((*MockTagIterator)(nil).Remaining))
}
+
+// Rewind mocks base method
+func (m *MockTagIterator) Rewind() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Rewind")
+}
+
+// Rewind indicates an expected call of Rewind
+func (mr *MockTagIteratorMockRecorder) Rewind() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rewind", reflect.TypeOf((*MockTagIterator)(nil).Rewind))
+}
diff --git a/src/x/ident/identifier_pool.go b/src/x/ident/identifier_pool.go
index 211c00392b..8239d818f4 100644
--- a/src/x/ident/identifier_pool.go
+++ b/src/x/ident/identifier_pool.go
@@ -81,7 +81,7 @@ func NewPool(
})
p.tagArrayPool.Init()
p.itersPool.Init(func() interface{} {
- return newTagSliceIter(Tags{}, p)
+ return newTagSliceIter(Tags{}, nil, p)
})
return p
diff --git a/src/x/ident/identifier_pool_test.go b/src/x/ident/identifier_pool_test.go
index 42183cc7f5..f1a70fe832 100644
--- a/src/x/ident/identifier_pool_test.go
+++ b/src/x/ident/identifier_pool_test.go
@@ -185,7 +185,7 @@ func (s idPoolTestSuite) TestPoolGetTagsIterator() {
ctx.BlockingClose()
- s.Require().Nil(iter.(*tagSliceIter).backingSlice)
+ s.Require().Equal(tagsSlice{}, iter.(*tagSliceIter).backingSlice)
s.Require().Equal(-1, iter.(*tagSliceIter).currentIdx)
}
@@ -206,7 +206,7 @@ func (s idPoolTestSuite) TestPoolTagsIterator() {
iter.Close()
- s.Require().Nil(iter.(*tagSliceIter).backingSlice)
+ s.Require().Equal(tagsSlice{}, iter.(*tagSliceIter).backingSlice)
s.Require().Equal(-1, iter.(*tagSliceIter).currentIdx)
}
diff --git a/src/x/ident/tag_iterator.go b/src/x/ident/tag_iterator.go
index 94e7ab1fde..f51508b0c3 100644
--- a/src/x/ident/tag_iterator.go
+++ b/src/x/ident/tag_iterator.go
@@ -22,6 +22,8 @@ package ident
import (
"errors"
+
+ "github.com/m3db/m3/src/m3ninx/doc"
)
var (
@@ -52,29 +54,69 @@ func NewTagStringsIterator(inputs ...string) (TagIterator, error) {
// NewTagsIterator returns a TagsIterator over a set of tags.
func NewTagsIterator(tags Tags) TagsIterator {
- return newTagSliceIter(tags, nil)
+ return newTagSliceIter(tags, nil, nil)
+}
+
+// NewFieldsTagsIterator returns a TagsIterator over a set of fields.
+func NewFieldsTagsIterator(fields []doc.Field) TagsIterator {
+ return newTagSliceIter(Tags{}, fields, nil)
}
func newTagSliceIter(
tags Tags,
+ fields []doc.Field,
pool Pool,
) *tagSliceIter {
- iter := &tagSliceIter{pool: pool}
- iter.Reset(tags)
+ iter := &tagSliceIter{
+ nameBytesID: NewReuseableBytesID(),
+ valueBytesID: NewReuseableBytesID(),
+ pool: pool,
+ }
+ iter.currentReuseableTag = Tag{
+ Name: iter.nameBytesID,
+ Value: iter.valueBytesID,
+ }
+ if len(tags.Values()) > 0 {
+ iter.Reset(tags)
+ } else {
+ iter.ResetFields(fields)
+ }
return iter
}
+type tagsSliceType uint
+
+const (
+ tagSliceType tagsSliceType = iota
+ fieldSliceType
+)
+
+type tagsSlice struct {
+ tags []Tag
+ fields []doc.Field
+}
+
type tagSliceIter struct {
- backingSlice []Tag
- currentIdx int
- currentTag Tag
- pool Pool
+ backingSlice tagsSlice
+ currentIdx int
+ currentTag Tag
+ currentReuseableTag Tag
+ nameBytesID *ReuseableBytesID
+ valueBytesID *ReuseableBytesID
+ pool Pool
}
func (i *tagSliceIter) Next() bool {
i.currentIdx++
- if i.currentIdx < len(i.backingSlice) {
- i.currentTag = i.backingSlice[i.currentIdx]
+ l, t := i.lengthAndType()
+ if i.currentIdx < l {
+ if t == tagSliceType {
+ i.currentTag = i.backingSlice.tags[i.currentIdx]
+ } else {
+ i.nameBytesID.Reset(i.backingSlice.fields[i.currentIdx].Name)
+ i.valueBytesID.Reset(i.backingSlice.fields[i.currentIdx].Value)
+ i.currentTag = i.currentReuseableTag
+ }
return true
}
i.currentTag = Tag{}
@@ -97,7 +139,7 @@ func (i *tagSliceIter) Err() error {
}
func (i *tagSliceIter) Close() {
- i.backingSlice = nil
+ i.backingSlice = tagsSlice{}
i.currentIdx = 0
i.currentTag = Tag{}
@@ -109,11 +151,19 @@ func (i *tagSliceIter) Close() {
}
func (i *tagSliceIter) Len() int {
- return len(i.backingSlice)
+ l, _ := i.lengthAndType()
+ return l
+}
+
+func (i *tagSliceIter) lengthAndType() (int, tagsSliceType) {
+ if l := len(i.backingSlice.tags); l > 0 {
+ return l, tagSliceType
+ }
+ return len(i.backingSlice.fields), fieldSliceType
}
func (i *tagSliceIter) Remaining() int {
- if r := len(i.backingSlice) - 1 - i.currentIdx; r >= 0 {
+ if r := i.Len() - 1 - i.currentIdx; r >= 0 {
return r
}
return 0
@@ -122,25 +172,39 @@ func (i *tagSliceIter) Remaining() int {
func (i *tagSliceIter) Duplicate() TagIterator {
if i.pool != nil {
iter := i.pool.TagsIterator()
- iter.Reset(Tags{values: i.backingSlice})
+ if len(i.backingSlice.tags) > 0 {
+ iter.Reset(Tags{values: i.backingSlice.tags})
+ } else {
+ iter.ResetFields(i.backingSlice.fields)
+ }
for j := 0; j <= i.currentIdx; j++ {
iter.Next()
}
return iter
}
- return &tagSliceIter{
- backingSlice: i.backingSlice,
- currentIdx: i.currentIdx,
- currentTag: i.currentTag,
- }
+ return newTagSliceIter(Tags{values: i.backingSlice.tags},
+ i.backingSlice.fields, i.pool)
}
-func (i *tagSliceIter) Reset(tags Tags) {
- i.backingSlice = tags.Values()
+func (i *tagSliceIter) rewind() {
i.currentIdx = -1
i.currentTag = Tag{}
}
+func (i *tagSliceIter) Reset(tags Tags) {
+ i.backingSlice = tagsSlice{tags: tags.Values()}
+ i.rewind()
+}
+
+func (i *tagSliceIter) ResetFields(fields []doc.Field) {
+ i.backingSlice = tagsSlice{fields: fields}
+ i.rewind()
+}
+
+func (i *tagSliceIter) Rewind() {
+ i.rewind()
+}
+
// EmptyTagIterator returns an iterator over no tags.
var EmptyTagIterator TagIterator = emptyTagIterator{}
@@ -154,3 +218,4 @@ func (e emptyTagIterator) Close() {}
func (e emptyTagIterator) Len() int { return 0 }
func (e emptyTagIterator) Remaining() int { return 0 }
func (e emptyTagIterator) Duplicate() TagIterator { return e }
+func (e emptyTagIterator) Rewind() {}
diff --git a/src/x/ident/tag_iterator_test.go b/src/x/ident/tag_iterator_test.go
index a857339a4a..84df106c15 100644
--- a/src/x/ident/tag_iterator_test.go
+++ b/src/x/ident/tag_iterator_test.go
@@ -195,3 +195,22 @@ func TestTagIteratorDuplicateFromPool(t *testing.T) {
require.Empty(t, expected)
require.Equal(t, expectedLen, clone.Remaining())
}
+
+func TestTagSliceIteratorHas(t *testing.T) {
+ iter := NewTagsIterator(NewTags(
+ StringTag("foo", "bar"),
+ StringTag("qux", "qaz"),
+ ))
+ testTagIteratorValues(t, map[string]string{
+ "foo": "bar",
+ "qux": "qaz",
+ }, iter)
+ iter.Reset(NewTags(
+ StringTag("foo", "bar"),
+ StringTag("baz", "qux"),
+ ))
+ testTagIteratorValues(t, map[string]string{
+ "foo": "bar",
+ "baz": "qux",
+ }, iter)
+}
diff --git a/src/x/ident/types.go b/src/x/ident/types.go
index 511f963465..0334cc546c 100644
--- a/src/x/ident/types.go
+++ b/src/x/ident/types.go
@@ -24,6 +24,7 @@ package ident
import (
"fmt"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/context"
)
@@ -216,8 +217,11 @@ type TagIterator interface {
// Remaining returns the number of elements remaining to be iterated over.
Remaining() int
- // Dupe returns an independent duplicate of the iterator.
+ // Duplicate returns an independent duplicate of the iterator.
Duplicate() TagIterator
+
+ // Rewind resets the tag iterator to the initial position.
+ Rewind()
}
// TagsIterator represents a TagIterator that can be reset with a Tags
@@ -227,6 +231,9 @@ type TagsIterator interface {
// Reset allows the tag iterator to be reused with a new set of tags.
Reset(tags Tags)
+
+ // ResetFields allows tag iterator to be reused from a set of fields.
+ ResetFields(fields []doc.Field)
}
// Tags is a collection of Tag instances that can be pooled.
diff --git a/src/x/instrument/config.go b/src/x/instrument/config.go
index b50fb4ec55..110fd3e8f1 100644
--- a/src/x/instrument/config.go
+++ b/src/x/instrument/config.go
@@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"io"
+ "net"
"time"
prom "github.com/m3db/prometheus_client_golang/prometheus"
@@ -59,7 +60,7 @@ type MetricsConfiguration struct {
M3Reporter *m3.Configuration `yaml:"m3"`
// Prometheus reporter configuration.
- PrometheusReporter *prometheus.Configuration `yaml:"prometheus"`
+ PrometheusReporter *PrometheusConfiguration `yaml:"prometheus"`
// Metrics sampling rate.
SamplingRate float64 `yaml:"samplingRate" validate:"nonzero,min=0.0,max=1.0"`
@@ -97,9 +98,11 @@ type MetricsConfigurationPrometheusReporter struct {
Registry *prom.Registry
}
-// NewRootScopeAndReportersOptions is a set of options
+// NewRootScopeAndReportersOptions is a set of options.
type NewRootScopeAndReportersOptions struct {
- OnError func(e error)
+ PrometheusHandlerListener net.Listener
+ PrometheusExternalRegistries []PrometheusExternalRegistry
+ PrometheusOnError func(e error)
}
// NewRootScopeAndReporters creates a new tally.Scope based on a tally.CachedStatsReporter
@@ -112,16 +115,6 @@ func (mc *MetricsConfiguration) NewRootScopeAndReporters(
MetricsConfigurationReporters,
error,
) {
- // Set a default on error method for sane handling when registering metrics
- // results in an error with the Prometheus reporter.
- onError := func(e error) {
- logger := NewOptions().Logger()
- logger.Error("register metrics error", zap.Error(e))
- }
- if opts.OnError != nil {
- onError = opts.OnError
- }
-
var result MetricsConfigurationReporters
if mc.M3Reporter != nil {
r, err := mc.M3Reporter.NewReporter()
@@ -134,6 +127,16 @@ func (mc *MetricsConfiguration) NewRootScopeAndReporters(
}
}
if mc.PrometheusReporter != nil {
+ // Set a default on error method for sane handling when registering metrics
+ // results in an error with the Prometheus reporter.
+ onError := func(e error) {
+ logger := NewOptions().Logger()
+ logger.Error("register metrics error", zap.Error(e))
+ }
+ if opts.PrometheusOnError != nil {
+ onError = opts.PrometheusOnError
+ }
+
// Override the default registry with an empty one that does not have the default
// registered collectors (Go and Process). The M3 reporters will emit the Go metrics
// and the Process metrics are reported by both the M3 process reporter and a
@@ -153,14 +156,40 @@ func (mc *MetricsConfiguration) NewRootScopeAndReporters(
})); err != nil {
return nil, nil, MetricsConfigurationReporters{}, fmt.Errorf("could not create process collector: %v", err)
}
- opts := prometheus.ConfigurationOptions{
- Registry: registry,
- OnError: onError,
+ opts := PrometheusConfigurationOptions{
+ Registry: registry,
+ ExternalRegistries: opts.PrometheusExternalRegistries,
+ HandlerListener: opts.PrometheusHandlerListener,
+ OnError: onError,
}
+
+ // Use default instrument package default histogram buckets if not set.
+ if len(mc.PrometheusReporter.DefaultHistogramBuckets) == 0 {
+ for _, v := range DefaultHistogramTimerHistogramBuckets().AsValues() {
+ bucket := prometheus.HistogramObjective{
+ Upper: v,
+ }
+ mc.PrometheusReporter.DefaultHistogramBuckets =
+ append(mc.PrometheusReporter.DefaultHistogramBuckets, bucket)
+ }
+ }
+
+ if len(mc.PrometheusReporter.DefaultSummaryObjectives) == 0 {
+ for k, v := range DefaultSummaryQuantileObjectives() {
+ q := prometheus.SummaryObjective{
+ Percentile: k,
+ AllowedError: v,
+ }
+ mc.PrometheusReporter.DefaultSummaryObjectives =
+ append(mc.PrometheusReporter.DefaultSummaryObjectives, q)
+ }
+ }
+
r, err := mc.PrometheusReporter.NewReporter(opts)
if err != nil {
return nil, nil, MetricsConfigurationReporters{}, err
}
+
result.AllReporters = append(result.AllReporters, r)
result.PrometheusReporter = &MetricsConfigurationPrometheusReporter{
Reporter: r,
diff --git a/src/x/instrument/config_prometheus.go b/src/x/instrument/config_prometheus.go
new file mode 100644
index 0000000000..9c15c1ec8b
--- /dev/null
+++ b/src/x/instrument/config_prometheus.go
@@ -0,0 +1,330 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package instrument
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+
+ prom "github.com/m3db/prometheus_client_golang/prometheus"
+ "github.com/m3db/prometheus_client_golang/prometheus/promhttp"
+ dto "github.com/m3db/prometheus_client_model/go"
+ extprom "github.com/prometheus/client_golang/prometheus"
+ "github.com/uber-go/tally/prometheus"
+)
+
+// PrometheusConfiguration is a configuration for a Prometheus reporter.
+type PrometheusConfiguration struct {
+ // HandlerPath if specified will be used instead of using the default
+ // HTTP handler path "/metrics".
+ HandlerPath string `yaml:"handlerPath"`
+
+ // ListenAddress if specified will be used instead of just registering the
+ // handler on the default HTTP serve mux without listening.
+ ListenAddress string `yaml:"listenAddress"`
+
+ // TimerType is the default Prometheus type to use for Tally timers.
+ TimerType string `yaml:"timerType"`
+
+ // DefaultHistogramBuckets if specified will set the default histogram
+ // buckets to be used by the reporter.
+ DefaultHistogramBuckets []prometheus.HistogramObjective `yaml:"defaultHistogramBuckets"`
+
+ // DefaultSummaryObjectives if specified will set the default summary
+ // objectives to be used by the reporter.
+ DefaultSummaryObjectives []prometheus.SummaryObjective `yaml:"defaultSummaryObjectives"`
+
+ // OnError specifies what to do when an error either with listening
+ // on the specified listen address or registering a metric with the
+ // Prometheus. By default the registerer will panic.
+ OnError string `yaml:"onError"`
+}
+
+// HistogramObjective is a Prometheus histogram bucket.
+// See: https://godoc.org/github.com/prometheus/client_golang/prometheus#HistogramOpts
+type HistogramObjective struct {
+ Upper float64 `yaml:"upper"`
+}
+
+// SummaryObjective is a Prometheus summary objective.
+// See: https://godoc.org/github.com/prometheus/client_golang/prometheus#SummaryOpts
+type SummaryObjective struct {
+ Percentile float64 `yaml:"percentile"`
+ AllowedError float64 `yaml:"allowedError"`
+}
+
+// PrometheusConfigurationOptions allows some programatic options, such as using a
+// specific registry and what error callback to register.
+type PrometheusConfigurationOptions struct {
+ // Registry if not nil will specify the specific registry to use
+ // for registering metrics.
+ Registry *prom.Registry
+ // ExternalRegistries if set (with combination of a specified Registry)
+ // will also
+ ExternalRegistries []PrometheusExternalRegistry
+ // HandlerListener is the listener to register the server handler on.
+ HandlerListener net.Listener
+ // HandlerOpts is the reporter HTTP handler options, not specifying will
+ // use defaults.
+ HandlerOpts promhttp.HandlerOpts
+ // OnError allows for customization of what to do when a metric
+ // registration error fails, the default is to panic.
+ OnError func(e error)
+}
+
+// PrometheusExternalRegistry is an external Prometheus registry
+// to also expose as part of the handler.
+type PrometheusExternalRegistry struct {
+ // Registry is the external prometheus registry to list.
+ Registry *extprom.Registry
+ // SubScope will add a prefix to all metric names exported by
+ // this registry.
+ SubScope string
+}
+
+// NewReporter creates a new M3 Prometheus reporter from this configuration.
+func (c PrometheusConfiguration) NewReporter(
+ configOpts PrometheusConfigurationOptions,
+) (prometheus.Reporter, error) {
+ var opts prometheus.Options
+
+ if configOpts.Registry != nil {
+ opts.Registerer = configOpts.Registry
+ }
+
+ if configOpts.OnError != nil {
+ opts.OnRegisterError = configOpts.OnError
+ } else {
+ switch c.OnError {
+ case "stderr":
+ opts.OnRegisterError = func(err error) {
+ fmt.Fprintf(os.Stderr, "tally prometheus reporter error: %v\n", err)
+ }
+ case "log":
+ opts.OnRegisterError = func(err error) {
+ log.Printf("tally prometheus reporter error: %v\n", err)
+ }
+ case "none":
+ opts.OnRegisterError = func(err error) {}
+ default:
+ opts.OnRegisterError = func(err error) {
+ panic(err)
+ }
+ }
+ }
+
+ switch c.TimerType {
+ case "summary":
+ opts.DefaultTimerType = prometheus.SummaryTimerType
+ case "histogram":
+ opts.DefaultTimerType = prometheus.HistogramTimerType
+ }
+
+ if len(c.DefaultHistogramBuckets) > 0 {
+ values := make([]float64, 0, len(c.DefaultHistogramBuckets))
+ for _, value := range c.DefaultHistogramBuckets {
+ values = append(values, value.Upper)
+ }
+ opts.DefaultHistogramBuckets = values
+ }
+
+ if len(c.DefaultSummaryObjectives) > 0 {
+ values := make(map[float64]float64, len(c.DefaultSummaryObjectives))
+ for _, value := range c.DefaultSummaryObjectives {
+ values[value.Percentile] = value.AllowedError
+ }
+ opts.DefaultSummaryObjectives = values
+ }
+
+ reporter := prometheus.NewReporter(opts)
+
+ path := "/metrics"
+ if handlerPath := strings.TrimSpace(c.HandlerPath); handlerPath != "" {
+ path = handlerPath
+ }
+
+ handler := reporter.HTTPHandler()
+ if configOpts.Registry != nil {
+ gatherer := newMultiGatherer(configOpts.Registry, configOpts.ExternalRegistries)
+ handler = promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})
+ }
+
+ addr := strings.TrimSpace(c.ListenAddress)
+ if addr == "" && configOpts.HandlerListener == nil {
+ // If address not specified and server not specified, register
+ // on default mux.
+ http.Handle(path, handler)
+ } else {
+ mux := http.NewServeMux()
+ mux.Handle(path, handler)
+
+ listener := configOpts.HandlerListener
+ if listener == nil {
+ // Address must be specified if server was nil.
+ var err error
+ listener, err = net.Listen("tcp", addr)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "prometheus handler listen address error: %v", err)
+ }
+ }
+
+ go func() {
+ server := &http.Server{Handler: mux}
+ if err := server.Serve(listener); err != nil {
+ opts.OnRegisterError(err)
+ }
+ }()
+ }
+
+ return reporter, nil
+}
+
+func newMultiGatherer(
+ primary *prom.Registry,
+ ext []PrometheusExternalRegistry,
+) prom.Gatherer {
+ return &multiGatherer{
+ primary: primary,
+ ext: ext,
+ }
+}
+
+var _ prom.Gatherer = (*multiGatherer)(nil)
+
+type multiGatherer struct {
+ primary *prom.Registry
+ ext []PrometheusExternalRegistry
+}
+
+func (g *multiGatherer) Gather() ([]*dto.MetricFamily, error) {
+ results, err := g.primary.Gather()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(g.ext) == 0 {
+ return results, nil
+ }
+
+ for _, secondary := range g.ext {
+ gathered, err := secondary.Registry.Gather()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, elem := range gathered {
+ entry := &dto.MetricFamily{
+ Name: elem.Name,
+ Help: elem.Help,
+ Metric: make([]*dto.Metric, 0, len(elem.Metric)),
+ }
+
+ if secondary.SubScope != "" && entry.Name != nil {
+ scopedName := fmt.Sprintf("%s_%s", secondary.SubScope, *entry.Name)
+ entry.Name = &scopedName
+ }
+
+ if v := elem.Type; v != nil {
+ metricType := dto.MetricType(*v)
+ entry.Type = &metricType
+ }
+
+ for _, metricElem := range elem.Metric {
+ metricEntry := &dto.Metric{
+ Label: make([]*dto.LabelPair, 0, len(metricElem.Label)),
+ TimestampMs: metricElem.TimestampMs,
+ }
+
+ if v := metricElem.Gauge; v != nil {
+ metricEntry.Gauge = &dto.Gauge{
+ Value: v.Value,
+ }
+ }
+
+ if v := metricElem.Counter; v != nil {
+ metricEntry.Counter = &dto.Counter{
+ Value: v.Value,
+ }
+ }
+
+ if v := metricElem.Summary; v != nil {
+ metricEntry.Summary = &dto.Summary{
+ SampleCount: v.SampleCount,
+ SampleSum: v.SampleSum,
+ Quantile: make([]*dto.Quantile, 0, len(v.Quantile)),
+ }
+
+ for _, quantileElem := range v.Quantile {
+ quantileEntry := &dto.Quantile{
+ Quantile: quantileElem.Quantile,
+ Value: quantileElem.Value,
+ }
+ metricEntry.Summary.Quantile =
+ append(metricEntry.Summary.Quantile, quantileEntry)
+ }
+ }
+
+ if v := metricElem.Untyped; v != nil {
+ metricEntry.Untyped = &dto.Untyped{
+ Value: v.Value,
+ }
+ }
+
+ if v := metricElem.Histogram; v != nil {
+ metricEntry.Histogram = &dto.Histogram{
+ SampleCount: v.SampleCount,
+ SampleSum: v.SampleSum,
+ Bucket: make([]*dto.Bucket, 0, len(v.Bucket)),
+ }
+
+ for _, bucketElem := range v.Bucket {
+ bucketEntry := &dto.Bucket{
+ CumulativeCount: bucketElem.CumulativeCount,
+ UpperBound: bucketElem.UpperBound,
+ }
+ metricEntry.Histogram.Bucket =
+ append(metricEntry.Histogram.Bucket, bucketEntry)
+ }
+ }
+
+ for _, labelElem := range metricElem.Label {
+ labelEntry := &dto.LabelPair{
+ Name: labelElem.Name,
+ Value: labelElem.Value,
+ }
+
+ metricEntry.Label = append(metricEntry.Label, labelEntry)
+ }
+
+ entry.Metric = append(entry.Metric, metricEntry)
+ }
+
+ results = append(results, entry)
+ }
+ }
+
+ return results, nil
+}
diff --git a/src/x/instrument/config_test.go b/src/x/instrument/config_test.go
new file mode 100644
index 0000000000..83b208674c
--- /dev/null
+++ b/src/x/instrument/config_test.go
@@ -0,0 +1,254 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package instrument
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/http"
+ "testing"
+
+ xjson "github.com/m3db/m3/src/x/json"
+ "github.com/sergi/go-diff/diffmatchpatch"
+
+ "github.com/golang/protobuf/jsonpb"
+ extprom "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/expfmt"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPrometheusDefaults(t *testing.T) {
+ sanitization := PrometheusMetricSanitization
+ extended := DetailedExtendedMetrics
+ cfg := MetricsConfiguration{
+ Sanitization: &sanitization,
+ SamplingRate: 1,
+ PrometheusReporter: &PrometheusConfiguration{
+ HandlerPath: "/metrics",
+ ListenAddress: "0.0.0.0:0",
+ TimerType: "histogram",
+ },
+ ExtendedMetrics: &extended,
+ }
+ _, closer, reporters, err := cfg.NewRootScopeAndReporters(
+ NewRootScopeAndReportersOptions{})
+ require.NoError(t, err)
+ require.NotNil(t, reporters.PrometheusReporter)
+
+ defer closer.Close()
+
+ // Make sure populated default histogram buckets.
+ numDefaultBuckets := DefaultHistogramTimerHistogramBuckets().Len()
+ require.True(t, numDefaultBuckets > 0)
+ require.Equal(t, numDefaultBuckets, len(cfg.PrometheusReporter.DefaultHistogramBuckets))
+
+ // Make sure populated default summmary objectives buckets.
+ numQuantiles := len(DefaultSummaryQuantileObjectives())
+ require.True(t, numQuantiles > 0)
+ require.Equal(t, numQuantiles, len(cfg.PrometheusReporter.DefaultSummaryObjectives))
+}
+
+func TestPrometheusExternalRegistries(t *testing.T) {
+ extReg1 := PrometheusExternalRegistry{
+ Registry: extprom.NewRegistry(),
+ SubScope: "ext1",
+ }
+ extReg2 := PrometheusExternalRegistry{
+ Registry: extprom.NewRegistry(),
+ SubScope: "ext2",
+ }
+
+ sanitization := PrometheusMetricSanitization
+ extended := DetailedExtendedMetrics
+ cfg := MetricsConfiguration{
+ Sanitization: &sanitization,
+ SamplingRate: 1,
+ PrometheusReporter: &PrometheusConfiguration{
+ HandlerPath: "/metrics",
+ ListenAddress: "0.0.0.0:0",
+ TimerType: "histogram",
+ },
+ ExtendedMetrics: &extended,
+ }
+
+ listener, err := net.Listen("tcp", ":0")
+ require.NoError(t, err)
+
+ scope, closer, reporters, err := cfg.NewRootScopeAndReporters(
+ NewRootScopeAndReportersOptions{
+ PrometheusHandlerListener: listener,
+ PrometheusExternalRegistries: []PrometheusExternalRegistry{
+ extReg1,
+ extReg2,
+ },
+ })
+ require.NoError(t, err)
+ require.NotNil(t, reporters.PrometheusReporter)
+
+ foo := scope.Tagged(map[string]string{
+ "test": t.Name(),
+ }).Counter("foo")
+ foo.Inc(3)
+
+ bar := extprom.NewCounterVec(extprom.CounterOpts{
+ Name: "bar",
+ Help: "bar help",
+ }, []string{
+ "test",
+ }).With(map[string]string{
+ "test": t.Name(),
+ })
+ extReg1.Registry.MustRegister(bar)
+ bar.Inc()
+
+ baz := extprom.NewCounterVec(extprom.CounterOpts{
+ Name: "baz",
+ Help: "baz help",
+ }, []string{
+ "test",
+ }).With(map[string]string{
+ "test": t.Name(),
+ })
+ extReg2.Registry.MustRegister(baz)
+ baz.Inc()
+ baz.Inc()
+
+ // Wait for report.
+ require.NoError(t, closer.Close())
+
+ url := fmt.Sprintf("http://%s/metrics", listener.Addr().String())
+ resp, err := http.Get(url)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
+ defer resp.Body.Close()
+
+ var parser expfmt.TextParser
+ metricFamilies, err := parser.TextToMetricFamilies(resp.Body)
+ require.NoError(t, err)
+
+ expected := map[string]xjson.Map{
+ "foo": xjson.Map{
+ "name": "foo",
+ "help": "foo counter",
+ "type": "COUNTER",
+ "metric": xjson.Array{
+ xjson.Map{
+ "counter": xjson.Map{"value": 3},
+ "label": xjson.Array{
+ xjson.Map{
+ "name": "test",
+ "value": t.Name(),
+ },
+ },
+ },
+ },
+ },
+ "ext1_bar": xjson.Map{
+ "name": "ext1_bar",
+ "help": "bar help",
+ "type": "COUNTER",
+ "metric": xjson.Array{
+ xjson.Map{
+ "counter": xjson.Map{"value": 1},
+ "label": xjson.Array{
+ xjson.Map{
+ "name": "test",
+ "value": t.Name(),
+ },
+ },
+ },
+ },
+ },
+ "ext2_baz": xjson.Map{
+ "name": "ext2_baz",
+ "help": "baz help",
+ "type": "COUNTER",
+ "metric": xjson.Array{
+ xjson.Map{
+ "counter": xjson.Map{"value": 2},
+ "label": xjson.Array{
+ xjson.Map{
+ "name": "test",
+ "value": t.Name(),
+ },
+ },
+ },
+ },
+ },
+ }
+
+ expectMatch := len(expected)
+ actualMatch := 0
+ for k, v := range metricFamilies {
+ data, err := (&jsonpb.Marshaler{}).MarshalToString(v)
+ require.NoError(t, err)
+ // Turn this on for debugging:
+ // fmt.Printf("metric received: key=%s, value=%s\n", k, data)
+
+ expect, ok := expected[k]
+ if !ok {
+ continue
+ }
+
+ // Mark matched.
+ delete(expected, k)
+
+ expectJSON := mustPrettyJSONMap(t, expect)
+ actualJSON := mustPrettyJSONString(t, data)
+
+ require.Equal(t, expectJSON, actualJSON,
+ diff(expectJSON, actualJSON))
+ }
+
+ var remaining []string
+ for k := range expected {
+ remaining = append(remaining, k)
+ }
+
+ t.Logf("matched expected metrics: expected=%d, actual=%d",
+ expectMatch, actualMatch)
+
+ require.Equal(t, 0, len(remaining),
+ fmt.Sprintf("did not match expected metrics: %v", remaining))
+}
+
+func mustPrettyJSONMap(t *testing.T, value xjson.Map) string {
+ pretty, err := json.MarshalIndent(value, "", " ")
+ require.NoError(t, err)
+ return string(pretty)
+}
+
+func mustPrettyJSONString(t *testing.T, str string) string {
+ var unmarshalled map[string]interface{}
+ err := json.Unmarshal([]byte(str), &unmarshalled)
+ require.NoError(t, err)
+ pretty, err := json.MarshalIndent(unmarshalled, "", " ")
+ require.NoError(t, err)
+ return string(pretty)
+}
+
+func diff(expected, actual string) string {
+ dmp := diffmatchpatch.New()
+ diffs := dmp.DiffMain(expected, actual, false)
+ return dmp.DiffPrettyText(diffs)
+}
diff --git a/src/x/instrument/methods.go b/src/x/instrument/methods.go
index 98991821bf..f21a111957 100644
--- a/src/x/instrument/methods.go
+++ b/src/x/instrument/methods.go
@@ -32,6 +32,171 @@ var (
nullStopWatchStart tally.Stopwatch
)
+// TimerType is a type of timer, standard or histogram timer.
+type TimerType uint
+
+const (
+ // StandardTimerType is a standard timer type back by a regular timer.
+ StandardTimerType TimerType = iota
+ // HistogramTimerType is a histogram timer backed by a histogram.
+ HistogramTimerType
+)
+
+// TimerOptions is a set of timer options when creating a timer.
+type TimerOptions struct {
+ Type TimerType
+ StandardSampleRate float64
+ HistogramBuckets tally.Buckets
+}
+
+// NewTimer creates a new timer based on the timer options.
+func (o TimerOptions) NewTimer(scope tally.Scope, name string) tally.Timer {
+ return NewTimer(scope, name, o)
+}
+
+// DefaultHistogramTimerHistogramBuckets returns a set of default
+// histogram timer histogram buckets, from 2ms up to 1hr.
+func DefaultHistogramTimerHistogramBuckets() tally.Buckets {
+ return tally.ValueBuckets{
+ 0.002,
+ 0.004,
+ 0.006,
+ 0.008,
+ 0.01,
+ 0.02,
+ 0.04,
+ 0.06,
+ 0.08,
+ 0.1,
+ 0.2,
+ 0.4,
+ 0.6,
+ 0.8,
+ 1,
+ 1.5,
+ 2,
+ 2.5,
+ 3,
+ 3.5,
+ 4,
+ 4.5,
+ 5,
+ 5.5,
+ 6,
+ 6.5,
+ 7,
+ 7.5,
+ 8,
+ 8.5,
+ 9,
+ 9.5,
+ 10,
+ 15,
+ 20,
+ 25,
+ 30,
+ 35,
+ 40,
+ 45,
+ 50,
+ 55,
+ 60,
+ 150,
+ 300,
+ 450,
+ 600,
+ 900,
+ 1200,
+ 1500,
+ 1800,
+ 2100,
+ 2400,
+ 2700,
+ 3000,
+ 3300,
+ 3600,
+ }
+}
+
+// DefaultSummaryQuantileObjectives is a set of default summary
+// quantile objectives and allowed error thresholds.
+func DefaultSummaryQuantileObjectives() map[float64]float64 {
+ return map[float64]float64{
+ 0.5: 0.01,
+ 0.75: 0.001,
+ 0.95: 0.001,
+ 0.99: 0.001,
+ 0.999: 0.0001,
+ }
+}
+
+// NewStandardTimerOptions returns a set of standard timer options for
+// standard timer types.
+func NewStandardTimerOptions() TimerOptions {
+ return TimerOptions{Type: StandardTimerType}
+}
+
+// HistogramTimerOptions is a set of histogram timer options.
+type HistogramTimerOptions struct {
+ HistogramBuckets tally.Buckets
+}
+
+// NewHistogramTimerOptions returns a set of histogram timer options
+// and if no histogram buckets are set it will use the default
+// histogram buckets defined.
+func NewHistogramTimerOptions(opts HistogramTimerOptions) TimerOptions {
+ result := TimerOptions{Type: HistogramTimerType}
+ if opts.HistogramBuckets != nil && opts.HistogramBuckets.Len() > 0 {
+ result.HistogramBuckets = opts.HistogramBuckets
+ } else {
+ result.HistogramBuckets = DefaultHistogramTimerHistogramBuckets()
+ }
+ return result
+}
+
+var _ tally.Timer = (*timer)(nil)
+
+// timer is a timer that can be backed by a timer or a histogram
+// depending on TimerOptions.
+type timer struct {
+ TimerOptions
+ timer tally.Timer
+ histogram tally.Histogram
+}
+
+// NewTimer returns a new timer that is backed by a timer or a histogram
+// based on the timer options.
+func NewTimer(scope tally.Scope, name string, opts TimerOptions) tally.Timer {
+ t := &timer{TimerOptions: opts}
+ switch t.Type {
+ case HistogramTimerType:
+ t.histogram = scope.Histogram(name, opts.HistogramBuckets)
+ default:
+ t.timer = scope.Timer(name)
+ if rate := opts.StandardSampleRate; validRate(rate) {
+ t.timer = newSampledTimer(t.timer, rate)
+ }
+ }
+ return t
+}
+
+func (t *timer) Record(v time.Duration) {
+ switch t.Type {
+ case HistogramTimerType:
+ t.histogram.RecordDuration(v)
+ default:
+ t.timer.Record(v)
+ }
+}
+
+func (t *timer) Start() tally.Stopwatch {
+ return tally.NewStopwatch(time.Now(), t)
+}
+
+func (t *timer) RecordStopwatch(stopwatchStart time.Time) {
+ t.Record(time.Since(stopwatchStart))
+}
+
// sampledTimer is a sampled timer that implements the tally timer interface.
// NB(xichen): the sampling logic should eventually be implemented in tally.
type sampledTimer struct {
@@ -43,13 +208,25 @@ type sampledTimer struct {
// NewSampledTimer creates a new sampled timer.
func NewSampledTimer(base tally.Timer, rate float64) (tally.Timer, error) {
- if rate <= 0.0 || rate > 1.0 {
+ if !validRate(rate) {
return nil, fmt.Errorf("sampling rate %f must be between 0.0 and 1.0", rate)
}
+ return newSampledTimer(base, rate), nil
+}
+
+func validRate(rate float64) bool {
+ return rate > 0.0 && rate <= 1.0
+}
+
+func newSampledTimer(base tally.Timer, rate float64) tally.Timer {
+ if rate == 1.0 {
+ // Avoid the overhead of working out if should sample each time.
+ return base
+ }
return &sampledTimer{
Timer: base,
rate: uint64(1.0 / rate),
- }, nil
+ }
}
// MustCreateSampledTimer creates a new sampled timer, and panics if an error
@@ -118,12 +295,12 @@ func (m *MethodMetrics) ReportSuccessOrError(e error, d time.Duration) {
}
// NewMethodMetrics returns a new Method metrics for the given method name.
-func NewMethodMetrics(scope tally.Scope, methodName string, samplingRate float64) MethodMetrics {
+func NewMethodMetrics(scope tally.Scope, methodName string, opts TimerOptions) MethodMetrics {
return MethodMetrics{
Errors: scope.Counter(methodName + ".errors"),
Success: scope.Counter(methodName + ".success"),
- ErrorsLatency: MustCreateSampledTimer(scope.Timer(methodName+".errors-latency"), samplingRate),
- SuccessLatency: MustCreateSampledTimer(scope.Timer(methodName+".success-latency"), samplingRate),
+ ErrorsLatency: NewTimer(scope, methodName+".errors-latency", opts),
+ SuccessLatency: NewTimer(scope, methodName+".success-latency", opts),
}
}
@@ -140,14 +317,14 @@ type BatchMethodMetrics struct {
func NewBatchMethodMetrics(
scope tally.Scope,
methodName string,
- samplingRate float64,
+ opts TimerOptions,
) BatchMethodMetrics {
return BatchMethodMetrics{
RetryableErrors: scope.Counter(methodName + ".retryable-errors"),
NonRetryableErrors: scope.Counter(methodName + ".non-retryable-errors"),
Errors: scope.Counter(methodName + ".errors"),
Success: scope.Counter(methodName + ".success"),
- Latency: MustCreateSampledTimer(scope.Timer(methodName+".latency"), samplingRate),
+ Latency: NewTimer(scope, methodName+".latency", opts),
}
}
diff --git a/src/x/instrument/options.go b/src/x/instrument/options.go
index a72fb523c0..0ddff6ad1f 100644
--- a/src/x/instrument/options.go
+++ b/src/x/instrument/options.go
@@ -40,6 +40,7 @@ type options struct {
scope tally.Scope
tracer opentracing.Tracer
samplingRate float64
+ timerOptions TimerOptions
reportInterval time.Duration
}
@@ -86,14 +87,14 @@ func (o *options) SetTracer(tracer opentracing.Tracer) Options {
return &opts
}
-func (o *options) SetMetricsSamplingRate(value float64) Options {
+func (o *options) SetTimerOptions(value TimerOptions) Options {
opts := *o
- opts.samplingRate = value
+ opts.timerOptions = value
return &opts
}
-func (o *options) MetricsSamplingRate() float64 {
- return o.samplingRate
+func (o *options) TimerOptions() TimerOptions {
+ return o.timerOptions
}
func (o *options) SetReportInterval(value time.Duration) Options {
diff --git a/src/x/instrument/types.go b/src/x/instrument/types.go
index d686acd22b..98fab9b969 100644
--- a/src/x/instrument/types.go
+++ b/src/x/instrument/types.go
@@ -58,11 +58,13 @@ type Options interface {
// SetTracer sets the tracer.
SetTracer(tracer opentracing.Tracer) Options
- // SetMetricsSamplingRate sets the metrics sampling rate.
- SetMetricsSamplingRate(value float64) Options
+ // SetTimerOptions sets the metrics timer options to used
+ // when building timers from timer options.
+ SetTimerOptions(value TimerOptions) Options
- // SetMetricsSamplingRate returns the metrics sampling rate.
- MetricsSamplingRate() float64
+ // SetTimerOptions returns the metrics timer options to used
+ // when building timers from timer options.
+ TimerOptions() TimerOptions
// ReportInterval sets the time between reporting metrics within the system.
SetReportInterval(time.Duration) Options
diff --git a/src/x/io/read.go b/src/x/io/read.go
new file mode 100644
index 0000000000..5d38b94e28
--- /dev/null
+++ b/src/x/io/read.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package io
+
+import (
+ "bufio"
+ "io"
+)
+
+// ResettableReader is a resettable reader.
+type ResettableReader interface {
+ io.Reader
+ Reset(r io.Reader)
+}
+
+// ResettableReaderOptions are options for a resettable reader.
+type ResettableReaderOptions struct {
+ ReadBufferSize int
+}
+
+// ResettableReaderFn creates a resettable reader.
+type ResettableReaderFn func(r io.Reader, opts ResettableReaderOptions) ResettableReader
+
+// defaultResettableReaderFn creates a default resettable reader.
+func defaultResettableReaderFn() ResettableReaderFn {
+ return func(r io.Reader, opts ResettableReaderOptions) ResettableReader {
+ return bufio.NewReaderSize(r, opts.ReadBufferSize)
+ }
+}
diff --git a/src/x/io/rw.go b/src/x/io/rw.go
new file mode 100644
index 0000000000..6dae85c62f
--- /dev/null
+++ b/src/x/io/rw.go
@@ -0,0 +1,69 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package io
+
+// Options are options for resettable readers and writers.
+type Options interface {
+ // ResettableReaderFn returns the reader function.
+ SetResettableReaderFn(value ResettableReaderFn) Options
+
+ // // ResettableReaderFn sets the reader function.
+ ResettableReaderFn() ResettableReaderFn
+
+ // ResettableWriterFn returns the writer function.
+ SetResettableWriterFn(value ResettableWriterFn) Options
+
+ // ResettableWriterFn sets the writer function.
+ ResettableWriterFn() ResettableWriterFn
+}
+
+type options struct {
+ resettableReaderFn ResettableReaderFn
+ resettableWriterFn ResettableWriterFn
+}
+
+// NewOptions creates a new read write options.
+func NewOptions() Options {
+ return &options{
+ resettableReaderFn: defaultResettableReaderFn(),
+ resettableWriterFn: defaultResettableWriterFn(),
+ }
+}
+
+func (opts *options) SetResettableReaderFn(value ResettableReaderFn) Options {
+ o := *opts
+ o.resettableReaderFn = value
+ return &o
+}
+
+func (opts *options) ResettableReaderFn() ResettableReaderFn {
+ return opts.resettableReaderFn
+}
+
+func (opts *options) SetResettableWriterFn(value ResettableWriterFn) Options {
+ o := *opts
+ o.resettableWriterFn = value
+ return &o
+}
+
+func (opts *options) ResettableWriterFn() ResettableWriterFn {
+ return opts.resettableWriterFn
+}
diff --git a/src/x/io/write.go b/src/x/io/write.go
new file mode 100644
index 0000000000..261b785263
--- /dev/null
+++ b/src/x/io/write.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package io
+
+import (
+ "bufio"
+ "io"
+)
+
+// ResettableWriter is a resettable writer.
+type ResettableWriter interface {
+ io.Writer
+ Flush() error
+ Reset(w io.Writer)
+}
+
+// ResettableWriterOptions are options for a resettable writer.
+type ResettableWriterOptions struct {
+ WriteBufferSize int
+}
+
+// ResettableWriterFn creates a resettable writer.
+type ResettableWriterFn func(r io.Writer, opts ResettableWriterOptions) ResettableWriter
+
+// defaultResettableWriterFn creates a default resettable writer.
+func defaultResettableWriterFn() ResettableWriterFn {
+ return func(r io.Writer, opts ResettableWriterOptions) ResettableWriter {
+ return bufio.NewWriterSize(r, opts.WriteBufferSize)
+ }
+}
diff --git a/src/x/json/json.go b/src/x/json/json.go
new file mode 100644
index 0000000000..621d819d3c
--- /dev/null
+++ b/src/x/json/json.go
@@ -0,0 +1,45 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package json allows for easy duck typing of JSON, i.e. for test stubs
+// to compare JSON output against (such as src/x/test.MustPrettyJSON).
+package json
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Map is an untyped JSON map representation.
+type Map map[string]interface{}
+
+// Array is an untyped JSON array representation.
+type Array []interface{}
+
+// MustNewTestReader returns a io.Reader reader to the JSON value.
+func MustNewTestReader(t *testing.T, value interface{}) io.Reader {
+ reader := bytes.NewBuffer(nil)
+ require.NoError(t, json.NewEncoder(reader).Encode(value))
+ return reader
+}
diff --git a/src/x/net/http/response.go b/src/x/net/http/response.go
index e31a1a10c9..29f9c1e79f 100644
--- a/src/x/net/http/response.go
+++ b/src/x/net/http/response.go
@@ -30,6 +30,26 @@ import (
"go.uber.org/zap"
)
+const (
+ // HeaderContentType is the HTTP Content Type header.
+ HeaderContentType = "Content-Type"
+
+ // ContentTypeJSON is the Content-Type value for a JSON response.
+ ContentTypeJSON = "application/json"
+
+ // ContentTypeFormURLEncoded is the Content-Type value for a URL-encoded form.
+ ContentTypeFormURLEncoded = "application/x-www-form-urlencoded"
+
+ // ContentTypeHTMLUTF8 is the Content-Type value for UTF8-encoded HTML.
+ ContentTypeHTMLUTF8 = "text/html; charset=utf-8"
+
+ // ContentTypeProtobuf is the Content-Type value for a Protobuf message.
+ ContentTypeProtobuf = "application/x-protobuf"
+
+ // ContentTypeOctetStream is the Content-Type value for binary data.
+ ContentTypeOctetStream = "application/octet-stream"
+)
+
// WriteJSONResponse writes generic data to the ResponseWriter
func WriteJSONResponse(w http.ResponseWriter, data interface{}, logger *zap.Logger) {
jsonData, err := json.Marshal(data)
@@ -39,7 +59,7 @@ func WriteJSONResponse(w http.ResponseWriter, data interface{}, logger *zap.Logg
return
}
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
w.Write(jsonData)
}
@@ -48,7 +68,7 @@ func WriteJSONResponse(w http.ResponseWriter, data interface{}, logger *zap.Logg
func WriteProtoMsgJSONResponse(w http.ResponseWriter, data proto.Message, logger *zap.Logger) {
marshaler := jsonpb.Marshaler{EmitDefaults: true}
- w.Header().Set("Content-Type", "application/json")
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
err := marshaler.Marshal(w, data)
if err != nil {
logger.Error("unable to marshal json", zap.Error(err))
diff --git a/src/x/net/listen.go b/src/x/net/listen.go
new file mode 100644
index 0000000000..f6fdc7e23d
--- /dev/null
+++ b/src/x/net/listen.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package net
+
+import (
+ gonet "net"
+
+ "github.com/valyala/tcplisten"
+)
+
+var (
+ reusePortConfig = &tcplisten.Config{
+ ReusePort: true,
+ DeferAccept: true,
+ FastOpen: true,
+ }
+)
+
+// ListenerOptions is a set of listener options that
+// can create listeners.
+type ListenerOptions struct {
+ reusePort bool
+}
+
+// ListenerOption is a listener option that can
+// be applied to a set of listener options.
+type ListenerOption func(o *ListenerOptions)
+
+// ListenerReusePort is a listener option to reuse ports.
+func ListenerReusePort(value bool) ListenerOption {
+ return func(o *ListenerOptions) {
+ o.reusePort = true
+ }
+}
+
+// NewListenerOptions creates a set of listener options
+// with supplied options.
+func NewListenerOptions(opts ...ListenerOption) ListenerOptions {
+ var result ListenerOptions
+ for _, fn := range opts {
+ fn(&result)
+ }
+ return result
+}
+
+// Listen creates a new listener based on options.
+func (o ListenerOptions) Listen(
+ protocol, address string,
+) (gonet.Listener, error) {
+ if o.reusePort {
+ if protocol == "tcp" {
+ protocol = "tcp4"
+ }
+ return reusePortConfig.NewListener(protocol, address)
+ }
+ return gonet.Listen(protocol, address)
+}
diff --git a/src/x/opentracing/tracing.go b/src/x/opentracing/tracing.go
index 8e0fe1d333..c55c89053a 100644
--- a/src/x/opentracing/tracing.go
+++ b/src/x/opentracing/tracing.go
@@ -140,7 +140,7 @@ func (cfg *TracingConfiguration) newLightstepTracer(serviceName string) (opentra
tracer, err := lightstep.CreateTracer(cfg.Lightstep)
if err != nil {
- return nil, nil, fmt.Errorf("failed to create jaeger tracer: %v", err)
+ return nil, nil, fmt.Errorf("failed to create lightstep tracer: %v", err)
}
closer := &lightstepCloser{tracer: tracer}
diff --git a/src/x/pool/object.go b/src/x/pool/object.go
index be186f99fe..2ef9c50b16 100644
--- a/src/x/pool/object.go
+++ b/src/x/pool/object.go
@@ -25,31 +25,31 @@ import (
"math"
"sync/atomic"
+ "github.com/m3db/m3/src/x/unsafe"
+
"github.com/uber-go/tally"
+ "golang.org/x/sys/cpu"
)
var (
- errPoolAlreadyInitialized = errors.New("object pool already initialized")
- errPoolGetBeforeInitialized = errors.New("object pool get before initialized")
- errPoolPutBeforeInitialized = errors.New("object pool put before initialized")
+ errPoolAlreadyInitialized = errors.New("object pool already initialized")
+ errPoolAccessBeforeInitialized = errors.New("object pool accessed before it was initialized")
)
-const (
- // TODO(r): Use tally sampling when available
- sampleObjectPoolLengthEvery = 100
-)
+const sampleObjectPoolLengthEvery = 2048
type objectPool struct {
- opts ObjectPoolOptions
+ _ cpu.CacheLinePad
values chan interface{}
+ _ cpu.CacheLinePad
alloc Allocator
+ metrics objectPoolMetrics
+ onPoolAccessErrorFn OnPoolAccessErrorFn
size int
refillLowWatermark int
refillHighWatermark int
filling int32
initialized int32
- dice int32
- metrics objectPoolMetrics
}
type objectPoolMetrics struct {
@@ -68,9 +68,7 @@ func NewObjectPool(opts ObjectPoolOptions) ObjectPool {
m := opts.InstrumentOptions().MetricsScope()
p := &objectPool{
- opts: opts,
- values: make(chan interface{}, opts.Size()),
- size: opts.Size(),
+ size: opts.Size(),
refillLowWatermark: int(math.Ceil(
opts.RefillLowWatermark() * float64(opts.Size()))),
refillHighWatermark: int(math.Ceil(
@@ -81,6 +79,12 @@ func NewObjectPool(opts ObjectPoolOptions) ObjectPool {
getOnEmpty: m.Counter("get-on-empty"),
putOnFull: m.Counter("put-on-full"),
},
+ onPoolAccessErrorFn: opts.OnPoolAccessErrorFn(),
+ alloc: func() interface{} {
+ fn := opts.OnPoolAccessErrorFn()
+ fn(errPoolAccessBeforeInitialized)
+ return nil
+ },
}
p.setGauges()
@@ -90,36 +94,37 @@ func NewObjectPool(opts ObjectPoolOptions) ObjectPool {
func (p *objectPool) Init(alloc Allocator) {
if !atomic.CompareAndSwapInt32(&p.initialized, 0, 1) {
- fn := p.opts.OnPoolAccessErrorFn()
- fn(errPoolAlreadyInitialized)
+ p.onPoolAccessErrorFn(errPoolAlreadyInitialized)
return
}
- p.alloc = alloc
-
+ p.values = make(chan interface{}, p.size)
for i := 0; i < cap(p.values); i++ {
- p.values <- p.alloc()
+ p.values <- alloc()
}
+ p.alloc = alloc
p.setGauges()
}
func (p *objectPool) Get() interface{} {
- if atomic.LoadInt32(&p.initialized) != 1 {
- fn := p.opts.OnPoolAccessErrorFn()
- fn(errPoolGetBeforeInitialized)
- return p.alloc()
- }
+ var (
+ metrics = p.metrics
+ v interface{}
+ )
- var v interface{}
select {
case v = <-p.values:
default:
v = p.alloc()
- p.metrics.getOnEmpty.Inc(1)
+ metrics.getOnEmpty.Inc(1)
}
- p.trySetGauges()
+ if unsafe.Fastrandn(sampleObjectPoolLengthEvery) == 0 {
+ // inlined setGauges()
+ metrics.free.Update(float64(len(p.values)))
+ metrics.total.Update(float64(p.size))
+ }
if p.refillLowWatermark > 0 && len(p.values) <= p.refillLowWatermark {
p.tryFill()
@@ -129,25 +134,15 @@ func (p *objectPool) Get() interface{} {
}
func (p *objectPool) Put(obj interface{}) {
- if atomic.LoadInt32(&p.initialized) != 1 {
- fn := p.opts.OnPoolAccessErrorFn()
- fn(errPoolPutBeforeInitialized)
+ if p.values == nil {
+ p.onPoolAccessErrorFn(errPoolAccessBeforeInitialized)
return
}
-
select {
case p.values <- obj:
default:
p.metrics.putOnFull.Inc(1)
}
-
- p.trySetGauges()
-}
-
-func (p *objectPool) trySetGauges() {
- if atomic.AddInt32(&p.dice, 1)%sampleObjectPoolLengthEvery == 0 {
- p.setGauges()
- }
}
func (p *objectPool) setGauges() {
diff --git a/src/x/pool/object_test.go b/src/x/pool/object_test.go
index 30dee87a95..bf039943c4 100644
--- a/src/x/pool/object_test.go
+++ b/src/x/pool/object_test.go
@@ -21,6 +21,7 @@
package pool
import (
+ "strconv"
"testing"
"time"
@@ -87,6 +88,7 @@ func TestObjectPoolGetBeforeInitError(t *testing.T) {
var accessErr error
opts := NewObjectPoolOptions().SetOnPoolAccessErrorFn(func(err error) {
accessErr = err
+ panic(err)
})
pool := NewObjectPool(opts)
@@ -98,7 +100,7 @@ func TestObjectPoolGetBeforeInitError(t *testing.T) {
})
assert.Error(t, accessErr)
- assert.Equal(t, errPoolGetBeforeInitialized, accessErr)
+ assert.Equal(t, errPoolAccessBeforeInitialized, accessErr)
}
func TestObjectPoolPutBeforeInitError(t *testing.T) {
@@ -114,18 +116,118 @@ func TestObjectPoolPutBeforeInitError(t *testing.T) {
pool.Put(1)
assert.Error(t, accessErr)
- assert.Equal(t, errPoolPutBeforeInitialized, accessErr)
+ assert.Equal(t, errPoolAccessBeforeInitialized, accessErr)
}
func BenchmarkObjectPoolGetPut(b *testing.B) {
opts := NewObjectPoolOptions().SetSize(1)
pool := NewObjectPool(opts)
pool.Init(func() interface{} {
- return 1
+ b := make([]byte, 0, 16)
+ return &b
})
+ b.ResetTimer()
for n := 0; n < b.N; n++ {
- o := pool.Get()
+ o := pool.Get().(*[]byte)
+ _ = *o
pool.Put(o)
}
}
+
+// go test -benchmem -run=^$ github.com/m3db/m3/src/x/pool -bench '^(BenchmarkObjectPoolParallel)$' -cpu 1,2,4,6,8,12
+func BenchmarkObjectPoolParallelGetPut(b *testing.B) {
+ type poolObj struct {
+ a, b int
+ c *int64
+ ts int64
+ }
+
+ p := NewObjectPool(NewObjectPoolOptions().SetSize(1024))
+ p.Init(func() interface{} {
+ return &poolObj{}
+ })
+
+ ts := time.Now().UnixNano()
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ op := p.Get()
+ obj, ok := op.(*poolObj)
+ if !ok {
+ b.Fail()
+ }
+ // do something with object, so there's something going on between gets/puts:
+ obj.a = b.N
+ obj.c = &ts
+ obj.ts = ts + int64(b.N)
+ p.Put(obj)
+ }
+ })
+}
+
+func BenchmarkObjectPoolParallelGetMultiPutContended(b *testing.B) {
+ opts := NewObjectPoolOptions().
+ SetSize(256)
+
+ p := NewObjectPool(opts)
+ p.Init(func() interface{} {
+ b := make([]byte, 0, 64)
+ return &b
+ })
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ bufs := make([]*[]byte, 16)
+ for i := 0; i < len(bufs); i++ {
+ o, ok := p.Get().(*[]byte)
+ if !ok {
+ b.Fail()
+ }
+ bufs[i] = o
+ }
+ for i := 0; i < len(bufs); i++ {
+ o := bufs[i]
+ buf := *o
+ buf = strconv.AppendInt(buf[:0], 12344321, 10)
+ p.Put(o)
+ }
+ }
+ })
+}
+
+func BenchmarkObjectPoolParallelGetMultiPutContendedWithRefill(b *testing.B) {
+ opts := NewObjectPoolOptions().
+ SetSize(32).
+ SetRefillLowWatermark(0.05).
+ SetRefillHighWatermark(0.25)
+
+ p := NewObjectPool(opts)
+ p.Init(func() interface{} {
+ b := make([]byte, 0, 32)
+ return &b
+ })
+ objs := make([]interface{}, 16)
+
+ b.ResetTimer()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ for i := 0; i < len(objs); i++ {
+ o := p.Get()
+ objs[i] = o
+ }
+
+ for _, obj := range objs {
+ o, ok := obj.(*[]byte)
+ if !ok {
+ b.Fail()
+ }
+ buf := *o
+ buf = strconv.AppendInt(buf[:0], 12344321, 10)
+ p.Put(o)
+ }
+ }
+ })
+}
diff --git a/src/x/resource/lifetime_test.go b/src/x/resource/lifetime_test.go
index f6f23b122d..41290d46d7 100644
--- a/src/x/resource/lifetime_test.go
+++ b/src/x/resource/lifetime_test.go
@@ -25,7 +25,7 @@ import (
"time"
"github.com/stretchr/testify/require"
- "github.com/uber-go/atomic"
+ "go.uber.org/atomic"
)
func TestCancellableLifetime(t *testing.T) {
diff --git a/src/x/retry/retry.go b/src/x/retry/retry.go
index edbdeb2c89..0155ad6026 100644
--- a/src/x/retry/retry.go
+++ b/src/x/retry/retry.go
@@ -37,6 +37,7 @@ var (
)
type retrier struct {
+ opts Options
initialBackoff time.Duration
backoffFactor float64
maxBackoff time.Duration
@@ -49,6 +50,8 @@ type retrier struct {
}
type retrierMetrics struct {
+ calls tally.Counter
+ attempts tally.Counter
success tally.Counter
successLatency tally.Histogram
errors tally.Counter
@@ -74,6 +77,7 @@ func NewRetrier(opts Options) Retrier {
}
return &retrier{
+ opts: opts,
initialBackoff: opts.InitialBackoff(),
backoffFactor: opts.BackoffFactor(),
maxBackoff: opts.MaxBackoff(),
@@ -83,6 +87,8 @@ func NewRetrier(opts Options) Retrier {
rngFn: opts.RngFn(),
sleepFn: time.Sleep,
metrics: retrierMetrics{
+ calls: scope.Counter("calls"),
+ attempts: scope.Counter("attempts"),
success: scope.Counter("success"),
successLatency: histogramWithDurationBuckets(scope, "success-latency"),
errors: scope.Tagged(errorTags.retryable).Counter("errors"),
@@ -94,6 +100,10 @@ func NewRetrier(opts Options) Retrier {
}
}
+func (r *retrier) Options() Options {
+ return r.opts
+}
+
func (r *retrier) Attempt(fn Fn) error {
return r.attempt(nil, fn)
}
@@ -103,6 +113,9 @@ func (r *retrier) AttemptWhile(continueFn ContinueFn, fn Fn) error {
}
func (r *retrier) attempt(continueFn ContinueFn, fn Fn) error {
+ // Always track a call, useful for counting number of total operations.
+ r.metrics.calls.Inc(1)
+
attempt := 0
if continueFn != nil && !continueFn(attempt) {
@@ -112,6 +125,7 @@ func (r *retrier) attempt(continueFn ContinueFn, fn Fn) error {
start := time.Now()
err := fn()
duration := time.Since(start)
+ r.metrics.attempts.Inc(1)
attempt++
if err == nil {
r.metrics.successLatency.RecordDuration(duration)
@@ -143,6 +157,7 @@ func (r *retrier) attempt(continueFn ContinueFn, fn Fn) error {
start := time.Now()
err = fn()
duration := time.Since(start)
+ r.metrics.attempts.Inc(1)
attempt++
if err == nil {
r.metrics.successLatency.RecordDuration(duration)
@@ -197,7 +212,7 @@ func histogramWithDurationBuckets(scope tally.Scope, name string) tally.Histogra
// in the same query causing errors.
"schema": "v1",
})
- buckets := append(tally.DurationBuckets{0, time.Millisecond},
+ buckets := append(tally.DurationBuckets{0, time.Millisecond},
tally.MustMakeExponentialDurationBuckets(2*time.Millisecond, 1.5, 30)...)
return sub.Histogram(name, buckets)
}
diff --git a/src/x/retry/types.go b/src/x/retry/types.go
index 79f089b1b4..105f3baa10 100644
--- a/src/x/retry/types.go
+++ b/src/x/retry/types.go
@@ -50,6 +50,10 @@ type ContinueFn func(attempt int) bool
// Retrier is a executor that can retry attempts on executing methods.
type Retrier interface {
+ // Options returns the options used to construct the retrier, useful
+ // for changing instrumentation options, etc while preserving other options.
+ Options() Options
+
// Attempt will attempt to perform a function with retries.
Attempt(fn Fn) error
diff --git a/src/x/sampler/sampler.go b/src/x/sampler/sampler.go
index 748d911fe9..906ee72009 100644
--- a/src/x/sampler/sampler.go
+++ b/src/x/sampler/sampler.go
@@ -26,22 +26,74 @@ import (
"go.uber.org/atomic"
)
+// Rate is a sample rate.
+type Rate float64
+
+// Value returns the float64 sample rate value.
+func (r Rate) Value() float64 {
+ return float64(r)
+}
+
+// Validate validates a sample rate.
+func (r Rate) Validate() error {
+ if r < 0.0 || r > 1.0 {
+ return fmt.Errorf("invalid sample rate: actual=%f, valid=[0.0,1.0]", r)
+ }
+ return nil
+}
+
+// UnmarshalYAML unmarshals a sample rate.
+func (r *Rate) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var value float64
+ if err := unmarshal(&value); err != nil {
+ return err
+ }
+
+ parsed := Rate(value)
+ if err := parsed.Validate(); err != nil {
+ return err
+ }
+
+ *r = parsed
+
+ return nil
+}
+
// Sampler samples the requests, out of 100 sample calls,
// 100*sampleRate calls will be sampled.
type Sampler struct {
+ sampleRate Rate
sampleEvery int32
numTried *atomic.Int32
}
// NewSampler creates a new sampler with a sample rate.
-func NewSampler(sampleRate float64) (*Sampler, error) {
- if sampleRate <= 0.0 || sampleRate >= 1.0 {
- return nil, fmt.Errorf("invalid sample rate %f", sampleRate)
+func NewSampler(sampleRate Rate) (*Sampler, error) {
+ if err := sampleRate.Validate(); err != nil {
+ return nil, err
}
- return &Sampler{numTried: atomic.NewInt32(0), sampleEvery: int32(1.0 / sampleRate)}, nil
+ if sampleRate == 0 {
+ return &Sampler{
+ sampleRate: sampleRate,
+ sampleEvery: 0,
+ }, nil
+ }
+ return &Sampler{
+ sampleRate: sampleRate,
+ numTried: atomic.NewInt32(0),
+ sampleEvery: int32(1.0 / sampleRate),
+ }, nil
}
// Sample returns true when the call is sampled.
func (t *Sampler) Sample() bool {
+ if t.sampleEvery == 0 {
+ return false
+ }
return (t.numTried.Inc()-1)%t.sampleEvery == 0
}
+
+// SampleRate returns the effective sample rate.
+func (t *Sampler) SampleRate() Rate {
+ return t.sampleRate
+}
diff --git a/src/x/sampler/sampler_test.go b/src/x/sampler/sampler_test.go
index 92ac33ec98..77d6b29a5e 100644
--- a/src/x/sampler/sampler_test.go
+++ b/src/x/sampler/sampler_test.go
@@ -23,7 +23,9 @@ package sampler
import (
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
)
func TestInvalidSampleRate(t *testing.T) {
@@ -31,10 +33,10 @@ func TestInvalidSampleRate(t *testing.T) {
require.Error(t, err)
_, err = NewSampler(0.0)
- require.Error(t, err)
+ require.NoError(t, err)
_, err = NewSampler(1.0)
- require.Error(t, err)
+ require.NoError(t, err)
_, err = NewSampler(2.0)
require.Error(t, err)
@@ -61,3 +63,60 @@ func TestSampler(t *testing.T) {
require.False(t, s.Sample())
require.True(t, s.Sample())
}
+
+func TestRateUnmarshalYAML(t *testing.T) {
+ type config struct {
+ Rate Rate `yaml:"rate"`
+ }
+
+ tests := []struct {
+ input []byte
+ expectValue float64
+ expectErr bool
+ }{
+ {
+ input: []byte("rate: foo\n"),
+ expectValue: 0,
+ expectErr: true,
+ },
+ {
+ input: []byte("rate: -1\n"),
+ expectValue: 0,
+ expectErr: true,
+ },
+ {
+ input: []byte("\n"),
+ expectValue: 0,
+ expectErr: false,
+ },
+ {
+ input: []byte("rate: 0\n"),
+ expectValue: 0,
+ expectErr: false,
+ },
+ {
+ input: []byte("rate: 1\n"),
+ expectValue: 1,
+ expectErr: false,
+ },
+ {
+ input: []byte("rate: 1.01\n"),
+ expectValue: 0,
+ expectErr: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(string(test.input), func(t *testing.T) {
+ var c config
+ err := yaml.Unmarshal(test.input, &c)
+ if test.expectErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ assert.Equal(t, test.expectValue, c.Rate.Value())
+ })
+ }
+}
diff --git a/src/x/serialize/decoder.go b/src/x/serialize/decoder.go
index 588baa1547..9d1b5d9dce 100644
--- a/src/x/serialize/decoder.go
+++ b/src/x/serialize/decoder.go
@@ -237,3 +237,12 @@ func (d *decoder) Duplicate() ident.TagIterator {
}
return iter
}
+
+func (d *decoder) Rewind() {
+ if d.checkedData == nil {
+ return
+ }
+ d.checkedData.IncRef()
+ d.Reset(d.checkedData)
+ d.checkedData.DecRef()
+}
diff --git a/src/x/serialize/decoder_options.go b/src/x/serialize/decoder_options.go
index bfaf0e1392..d626cb2030 100644
--- a/src/x/serialize/decoder_options.go
+++ b/src/x/serialize/decoder_options.go
@@ -39,13 +39,44 @@ type decodeOpts struct {
limits TagSerializationLimits
}
+// TagDecoderOptionsConfig allows for defaults to be set at initialization.
+type TagDecoderOptionsConfig struct {
+ CheckBytesWrapperPoolSize *int `yaml:"checkBytesWrapperPoolSize"`
+ CheckBytesWrapperPoolLowWatermark *float64 `yaml:"checkBytesWrapperPoolLowWatermark"`
+ CheckBytesWrapperPoolHighWatermark *float64 `yaml:"checkBytesWrapperPoolHighWatermark"`
+}
+
+// CheckBytesWrapperPoolSizeOrDefault returns config value or default.
+func (c TagDecoderOptionsConfig) CheckBytesWrapperPoolSizeOrDefault() int {
+ if c.CheckBytesWrapperPoolSize == nil {
+ return defaultCheckBytesWrapperPoolSize
+ }
+ return *c.CheckBytesWrapperPoolSize
+}
+
+// CheckBytesWrapperPoolLowWatermarkOrDefault returns config value or default.
+func (c TagDecoderOptionsConfig) CheckBytesWrapperPoolLowWatermarkOrDefault() float64 {
+ if c.CheckBytesWrapperPoolLowWatermark == nil {
+ return defaultCheckBytesWrapperPoolLowWatermark
+ }
+ return *c.CheckBytesWrapperPoolLowWatermark
+}
+
+// CheckBytesWrapperPoolHighWatermarkOrDefault returns config value or default.
+func (c TagDecoderOptionsConfig) CheckBytesWrapperPoolHighWatermarkOrDefault() float64 {
+ if c.CheckBytesWrapperPoolHighWatermark == nil {
+ return defaultCheckBytesWrapperPoolHighWatermark
+ }
+ return *c.CheckBytesWrapperPoolHighWatermark
+}
+
// NewTagDecoderOptions returns a new TagDecoderOptions.
-func NewTagDecoderOptions() TagDecoderOptions {
+func NewTagDecoderOptions(cfg TagDecoderOptionsConfig) TagDecoderOptions {
pool := xpool.NewCheckedBytesWrapperPool(
pool.NewObjectPoolOptions().
- SetSize(defaultCheckBytesWrapperPoolSize).
- SetRefillLowWatermark(defaultCheckBytesWrapperPoolLowWatermark).
- SetRefillHighWatermark(defaultCheckBytesWrapperPoolHighWatermark))
+ SetSize(cfg.CheckBytesWrapperPoolSizeOrDefault()).
+ SetRefillLowWatermark(cfg.CheckBytesWrapperPoolLowWatermarkOrDefault()).
+ SetRefillHighWatermark(cfg.CheckBytesWrapperPoolHighWatermarkOrDefault()))
pool.Init()
return &decodeOpts{
wrapperPool: pool,
diff --git a/src/x/serialize/decoder_test.go b/src/x/serialize/decoder_test.go
index c580161e5a..81f7a0a58a 100644
--- a/src/x/serialize/decoder_test.go
+++ b/src/x/serialize/decoder_test.go
@@ -25,14 +25,16 @@ import (
"testing"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testDecodeOpts = NewTagDecoderOptions()
+ testDecodeOpts = NewTagDecoderOptions(TagDecoderOptionsConfig{})
)
func TestEmptyDecode(t *testing.T) {
@@ -50,9 +52,9 @@ func TestEmptyDecode(t *testing.T) {
func TestEmptyTagNameDecode(t *testing.T) {
var b []byte
b = append(b, headerMagicBytes...)
- b = append(b, encodeUInt16(uint16(1))...) /* num tags */
- b = append(b, encodeUInt16(0)...) /* len empty string */
- b = append(b, encodeUInt16(4)...) /* len defg */
+ b = append(b, encodeUInt16(1, make([]byte, 2))...) /* num tags */
+ b = append(b, encodeUInt16(0, make([]byte, 2))...) /* len empty string */
+ b = append(b, encodeUInt16(4, make([]byte, 2))...) /* len defg */
b = append(b, []byte("defg")...)
d := newTagDecoder(testDecodeOpts, nil)
@@ -64,10 +66,10 @@ func TestEmptyTagNameDecode(t *testing.T) {
func TestEmptyTagValueDecode(t *testing.T) {
var b []byte
b = append(b, headerMagicBytes...)
- b = append(b, encodeUInt16(uint16(1))...) /* num tags */
- b = append(b, encodeUInt16(1)...) /* len "1" */
- b = append(b, []byte("a")...) /* tag name */
- b = append(b, encodeUInt16(0)...) /* len tag value */
+ b = append(b, encodeUInt16(1, make([]byte, 2))...) /* num tags */
+ b = append(b, encodeUInt16(1, make([]byte, 2))...) /* len "1" */
+ b = append(b, []byte("a")...) /* tag name */
+ b = append(b, encodeUInt16(0, make([]byte, 2))...) /* len tag value */
d := newTagDecoder(testDecodeOpts, nil)
d.Reset(wrapAsCheckedBytes(b))
@@ -127,6 +129,41 @@ func TestDecodeSimple(t *testing.T) {
d.Close()
}
+func TestDecodeAfterRewind(t *testing.T) {
+ b := testTagDecoderBytes()
+ d := newTestTagDecoder()
+ d.Reset(b)
+ require.NoError(t, d.Err())
+
+ count := 10
+ printedTags := []byte("abcdefgxbar")
+ acBytes := make([]byte, 0, count*len(printedTags))
+ exBytes := make([]byte, count*len(printedTags))
+ readIter := func(it ident.TagIterator) {
+ tag := d.Current()
+ acBytes = append(acBytes, tag.Name.Bytes()...)
+ acBytes = append(acBytes, tag.Value.Bytes()...)
+ }
+
+ for i := 0; i < count; i++ {
+ require.True(t, d.Next())
+ readIter(d)
+ require.True(t, d.Next())
+ readIter(d)
+ require.False(t, d.Next())
+ require.NoError(t, d.Err())
+ copy(exBytes[i*len(printedTags):], printedTags)
+ d.Rewind()
+ }
+
+ assert.Equal(t, exBytes, acBytes)
+ assert.Equal(t, string(exBytes), string(acBytes))
+
+ assert.Equal(t, 1, b.NumRef())
+ d.Close()
+ assert.Equal(t, 0, b.NumRef())
+}
+
func TestDecodeTooManyTags(t *testing.T) {
b := testTagDecoderBytes()
opts := testDecodeOpts.SetTagSerializationLimits(
@@ -152,7 +189,7 @@ func TestDecodeLiteralTooLong(t *testing.T) {
func TestDecodeMissingTags(t *testing.T) {
var b []byte
b = append(b, headerMagicBytes...)
- b = append(b, encodeUInt16(uint16(2))...) /* num tags */
+ b = append(b, encodeUInt16(2, make([]byte, 2))...) /* num tags */
d := newTestTagDecoder()
d.Reset(wrapAsCheckedBytes(b))
@@ -165,7 +202,7 @@ func TestDecodeMissingTags(t *testing.T) {
func TestDecodeOwnershipFinalize(t *testing.T) {
var b []byte
b = append(b, headerMagicBytes...)
- b = append(b, encodeUInt16(uint16(2))...) /* num tags */
+ b = append(b, encodeUInt16(2, make([]byte, 2))...) /* num tags */
wrappedBytes := wrapAsCheckedBytes(b)
require.Equal(t, 0, wrappedBytes.NumRef())
@@ -187,14 +224,14 @@ func TestDecodeOwnershipFinalize(t *testing.T) {
func TestDecodeMissingValue(t *testing.T) {
var b []byte
b = append(b, headerMagicBytes...)
- b = append(b, encodeUInt16(uint16(2))...) /* num tags */
- b = append(b, encodeUInt16(3)...) /* len abc */
+ b = append(b, encodeUInt16(2, make([]byte, 2))...) /* num tags */
+ b = append(b, encodeUInt16(3, make([]byte, 2))...) /* len abc */
b = append(b, []byte("abc")...)
- b = append(b, encodeUInt16(4)...) /* len defg */
+ b = append(b, encodeUInt16(4, make([]byte, 2))...) /* len defg */
b = append(b, []byte("defg")...)
- b = append(b, encodeUInt16(1)...) /* len x */
+ b = append(b, encodeUInt16(1, make([]byte, 2))...) /* len x */
b = append(b, []byte("x")...)
d := newTestTagDecoder()
@@ -328,18 +365,18 @@ func wrapAsCheckedBytes(b []byte) checked.Bytes {
func testTagDecoderBytesRaw() []byte {
var b []byte
b = append(b, headerMagicBytes...)
- b = append(b, encodeUInt16(uint16(2))...) /* num tags */
+ b = append(b, encodeUInt16(2, make([]byte, 2))...) /* num tags */
- b = append(b, encodeUInt16(3)...) /* len abc */
+ b = append(b, encodeUInt16(3, make([]byte, 2))...) /* len abc */
b = append(b, []byte("abc")...)
- b = append(b, encodeUInt16(4)...) /* len defg */
+ b = append(b, encodeUInt16(4, make([]byte, 2))...) /* len defg */
b = append(b, []byte("defg")...)
- b = append(b, encodeUInt16(1)...) /* len x */
+ b = append(b, encodeUInt16(1, make([]byte, 2))...) /* len x */
b = append(b, []byte("x")...)
- b = append(b, encodeUInt16(3)...) /* len bar */
+ b = append(b, encodeUInt16(3, make([]byte, 2))...) /* len bar */
b = append(b, []byte("bar")...)
return b
}
diff --git a/src/x/serialize/encoder.go b/src/x/serialize/encoder.go
index c54859ea2e..a682a639ee 100644
--- a/src/x/serialize/encoder.go
+++ b/src/x/serialize/encoder.go
@@ -52,9 +52,13 @@ import (
var (
byteOrder binary.ByteOrder = binary.LittleEndian
- headerMagicBytes = encodeUInt16(headerMagicNumber)
+ headerMagicBytes = make([]byte, 2)
)
+func init() {
+ encodeUInt16(headerMagicNumber, headerMagicBytes)
+}
+
var (
errTagEncoderInUse = errors.New("encoder already in use")
errTagLiteralTooLong = errors.New("literal is too long")
@@ -66,8 +70,10 @@ type newCheckedBytesFn func([]byte, checked.BytesOptions) checked.Bytes
var defaultNewCheckedBytesFn = checked.NewBytes
type encoder struct {
- buf *bytes.Buffer
- checkedBytes checked.Bytes
+ buf *bytes.Buffer
+ checkedBytes checked.Bytes
+ staticBuffer [2]byte
+ staticBufferSlice []byte
opts TagEncoderOptions
pool TagEncoderPool
@@ -80,21 +86,23 @@ func newTagEncoder(
) TagEncoder {
b := make([]byte, 0, opts.InitialCapacity())
cb := newFn(nil, nil)
- return &encoder{
+ e := &encoder{
buf: bytes.NewBuffer(b),
checkedBytes: cb,
opts: opts,
pool: pool,
}
+ e.staticBufferSlice = e.staticBuffer[:]
+ return e
}
-func (e *encoder) Encode(srcTags ident.TagIterator) error {
+func (e *encoder) Encode(tags ident.TagIterator) error {
if e.checkedBytes.NumRef() > 0 {
return errTagEncoderInUse
}
- tags := srcTags.Duplicate()
- defer tags.Close()
+ tags.Rewind()
+ defer tags.Rewind()
numTags := tags.Remaining()
max := int(e.opts.TagSerializationLimits().MaxNumberTags())
@@ -107,7 +115,7 @@ func (e *encoder) Encode(srcTags ident.TagIterator) error {
return err
}
- if _, err := e.buf.Write(encodeUInt16(uint16(numTags))); err != nil {
+ if _, err := e.buf.Write(e.encodeUInt16(uint16(numTags))); err != nil {
e.buf.Reset()
return err
}
@@ -177,7 +185,7 @@ func (e *encoder) encodeID(i ident.ID) error {
}
ld := uint16(len(d))
- if _, err := e.buf.Write(encodeUInt16(ld)); err != nil {
+ if _, err := e.buf.Write(e.encodeUInt16(ld)); err != nil {
return err
}
@@ -188,10 +196,16 @@ func (e *encoder) encodeID(i ident.ID) error {
return nil
}
-func encodeUInt16(v uint16) []byte {
- var bytes [2]byte
- byteOrder.PutUint16(bytes[:], v)
- return bytes[:]
+func (e *encoder) encodeUInt16(v uint16) []byte {
+ // NB(r): Use static buffer on the struct for encoding, otherwise if it's
+ // statically defined inline in the function it will escape to heap.
+ dest := e.staticBufferSlice[:2]
+ return encodeUInt16(v, dest)
+}
+
+func encodeUInt16(v uint16, dest []byte) []byte {
+ byteOrder.PutUint16(dest, v)
+ return dest
}
func decodeUInt16(b []byte) uint16 {
diff --git a/src/x/serialize/encoder_test.go b/src/x/serialize/encoder_test.go
index fe4d81651d..074f71a76f 100644
--- a/src/x/serialize/encoder_test.go
+++ b/src/x/serialize/encoder_test.go
@@ -108,7 +108,7 @@ func TestSimpleEncode(t *testing.T) {
2 /* bar length */ + len("bar")
require.Len(t, b, numExpectedBytes)
require.Equal(t, headerMagicBytes, b[:2])
- require.Equal(t, encodeUInt16(2), b[2:4])
+ require.Equal(t, encodeUInt16(2, make([]byte, 2)), b[2:4])
require.Equal(t, uint16(3), decodeUInt16(b[4:6])) /* len abc */
require.Equal(t, "abc", string(b[6:9]))
require.Equal(t, uint16(4), decodeUInt16(b[9:11])) /* len defg */
@@ -147,18 +147,17 @@ func TestEmptyTagIterEncode(t *testing.T) {
return mockBytes
}
- clonedIter := ident.NewMockTagIterator(ctrl)
iter := ident.NewMockTagIterator(ctrl)
mockBytes.EXPECT().IncRef()
mockBytes.EXPECT().Reset(gomock.Any())
gomock.InOrder(
mockBytes.EXPECT().NumRef().Return(0),
- iter.EXPECT().Duplicate().Return(clonedIter),
- clonedIter.EXPECT().Remaining().Return(0),
- clonedIter.EXPECT().Next().Return(false),
- clonedIter.EXPECT().Err().Return(nil),
- clonedIter.EXPECT().Close(),
+ iter.EXPECT().Rewind(),
+ iter.EXPECT().Remaining().Return(0),
+ iter.EXPECT().Next().Return(false),
+ iter.EXPECT().Err().Return(nil),
+ iter.EXPECT().Rewind(),
)
enc := newTagEncoder(newBytesFn, newTestEncoderOpts(), nil)
@@ -169,15 +168,14 @@ func TestTooManyTags(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- clonedIter := ident.NewMockTagIterator(ctrl)
iter := ident.NewMockTagIterator(ctrl)
testOpts := newTestEncoderOpts()
maxNumTags := testOpts.TagSerializationLimits().MaxNumberTags()
gomock.InOrder(
- iter.EXPECT().Duplicate().Return(clonedIter),
- clonedIter.EXPECT().Remaining().Return(1+int(maxNumTags)),
- clonedIter.EXPECT().Close(),
+ iter.EXPECT().Rewind(),
+ iter.EXPECT().Remaining().Return(1+int(maxNumTags)),
+ iter.EXPECT().Rewind(),
)
enc := newTagEncoder(defaultNewCheckedBytesFn, testOpts, nil)
@@ -193,22 +191,21 @@ func TestSingleValueTagIterEncode(t *testing.T) {
return mockBytes
}
- clonedIter := ident.NewMockTagIterator(ctrl)
iter := ident.NewMockTagIterator(ctrl)
mockBytes.EXPECT().IncRef()
mockBytes.EXPECT().Reset(gomock.Any())
gomock.InOrder(
mockBytes.EXPECT().NumRef().Return(0),
- iter.EXPECT().Duplicate().Return(clonedIter),
- clonedIter.EXPECT().Remaining().Return(1),
- clonedIter.EXPECT().Next().Return(true),
- clonedIter.EXPECT().Current().Return(
+ iter.EXPECT().Rewind(),
+ iter.EXPECT().Remaining().Return(1),
+ iter.EXPECT().Next().Return(true),
+ iter.EXPECT().Current().Return(
ident.StringTag("some", "tag"),
),
- clonedIter.EXPECT().Next().Return(false),
- clonedIter.EXPECT().Err().Return(nil),
- clonedIter.EXPECT().Close(),
+ iter.EXPECT().Next().Return(false),
+ iter.EXPECT().Err().Return(nil),
+ iter.EXPECT().Rewind(),
)
enc := newTagEncoder(newBytesFn, newTestEncoderOpts(), nil)
diff --git a/src/x/serialize/serialize_mock.go b/src/x/serialize/serialize_mock.go
index b18d7b9f46..daf40ce2e0 100644
--- a/src/x/serialize/serialize_mock.go
+++ b/src/x/serialize/serialize_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/x/serialize (interfaces: TagEncoder,TagEncoderPool,TagDecoder,TagDecoderPool,MetricTagsIterator,MetricTagsIteratorPool)
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -315,6 +315,18 @@ func (mr *MockTagDecoderMockRecorder) Reset(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockTagDecoder)(nil).Reset), arg0)
}
+// Rewind mocks base method
+func (m *MockTagDecoder) Rewind() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Rewind")
+}
+
+// Rewind indicates an expected call of Rewind
+func (mr *MockTagDecoderMockRecorder) Rewind() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rewind", reflect.TypeOf((*MockTagDecoder)(nil).Rewind))
+}
+
// MockTagDecoderPool is a mock of TagDecoderPool interface
type MockTagDecoderPool struct {
ctrl *gomock.Controller
diff --git a/src/x/server/options.go b/src/x/server/options.go
index a6125d258c..46e61370b9 100644
--- a/src/x/server/options.go
+++ b/src/x/server/options.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/m3db/m3/src/x/instrument"
+ xnet "github.com/m3db/m3/src/x/net"
"github.com/m3db/m3/src/x/retry"
)
@@ -31,9 +32,9 @@ const (
// By default keepAlives are enabled for TCP connections.
defaultTCPConnectionKeepAlive = true
- // By default the keep alive period is not set and the actual keep alive
- // period is determined by the OS and the platform.
- defaultTCPConnectionKeepAlivePeriod = 0
+ // By default the keep alive period is fairly short for fast
+ // breaking of stale connections.
+ defaultTCPConnectionKeepAlivePeriod = 10 * time.Second
)
// Options provide a set of server options
@@ -64,6 +65,12 @@ type Options interface {
// TCPConnectionKeepAlivePeriod returns the keep alive period for tcp connections.
TCPConnectionKeepAlivePeriod() time.Duration
+
+ // SetListenerOptions sets the listener options for the server.
+ SetListenerOptions(value xnet.ListenerOptions) Options
+
+ // ListenerOptions sets the listener options for the server.
+ ListenerOptions() xnet.ListenerOptions
}
type options struct {
@@ -71,6 +78,7 @@ type options struct {
retryOpts retry.Options
tcpConnectionKeepAlive bool
tcpConnectionKeepAlivePeriod time.Duration
+ listenerOpts xnet.ListenerOptions
}
// NewOptions creates a new set of server options
@@ -80,6 +88,7 @@ func NewOptions() Options {
retryOpts: retry.NewOptions(),
tcpConnectionKeepAlive: defaultTCPConnectionKeepAlive,
tcpConnectionKeepAlivePeriod: defaultTCPConnectionKeepAlivePeriod,
+ listenerOpts: xnet.NewListenerOptions(),
}
}
@@ -122,3 +131,13 @@ func (o *options) SetTCPConnectionKeepAlivePeriod(value time.Duration) Options {
func (o *options) TCPConnectionKeepAlivePeriod() time.Duration {
return o.tcpConnectionKeepAlivePeriod
}
+
+func (o *options) SetListenerOptions(value xnet.ListenerOptions) Options {
+ opts := *o
+ opts.listenerOpts = value
+ return &opts
+}
+
+func (o *options) ListenerOptions() xnet.ListenerOptions {
+ return o.listenerOpts
+}
diff --git a/src/x/server/server.go b/src/x/server/server.go
index 7e826a13c2..09da8c08d0 100644
--- a/src/x/server/server.go
+++ b/src/x/server/server.go
@@ -83,13 +83,14 @@ type server struct {
tcpConnectionKeepAlive bool
tcpConnectionKeepAlivePeriod time.Duration
- closed bool
- closedChan chan struct{}
- numConns int32
- conns []net.Conn
- wgConns sync.WaitGroup
- metrics serverMetrics
- handler Handler
+ closed bool
+ closedChan chan struct{}
+ numConns int32
+ conns []net.Conn
+ wgConns sync.WaitGroup
+ metrics serverMetrics
+ handler Handler
+ listenerOpts xnet.ListenerOptions
addConnectionFn addConnectionFn
removeConnectionFn removeConnectionFn
@@ -110,6 +111,7 @@ func NewServer(address string, handler Handler, opts Options) Server {
closedChan: make(chan struct{}),
metrics: newServerMetrics(scope),
handler: handler,
+ listenerOpts: opts.ListenerOptions(),
}
// Set up the connection functions.
@@ -123,7 +125,7 @@ func NewServer(address string, handler Handler, opts Options) Server {
}
func (s *server) ListenAndServe() error {
- listener, err := net.Listen("tcp", s.address)
+ listener, err := s.listenerOpts.Listen("tcp", s.address)
if err != nil {
return err
}
diff --git a/src/x/sync/cpu_linux_amd64.go b/src/x/sync/cpu_linux_amd64.go
new file mode 100644
index 0000000000..9d5374fec9
--- /dev/null
+++ b/src/x/sync/cpu_linux_amd64.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package sync
+
+// getCore is the currently running CPU core.
+func getCore() int
diff --git a/src/x/sync/cpu_linux_amd64.s b/src/x/sync/cpu_linux_amd64.s
new file mode 100644
index 0000000000..63d130965b
--- /dev/null
+++ b/src/x/sync/cpu_linux_amd64.s
@@ -0,0 +1,14 @@
+#include "textflag.h"
+#include "go_asm.h"
+
+#define get_tls(r) MOVQ TLS, r
+
+// func getCore() int
+TEXT ·getCore(SB), NOSPLIT, $0
+ // RDTSCP
+ BYTE $0x0f; BYTE $0x01; BYTE $0xf9
+
+ // Linux puts core ID in the bottom byte.
+ ANDQ $0xff, CX
+ MOVQ CX, ret+0(FP)
+ RET
diff --git a/src/x/sync/cpu_supported_arch_unsupported_os.go b/src/x/sync/cpu_supported_arch_unsupported_os.go
new file mode 100644
index 0000000000..a0ceef2d28
--- /dev/null
+++ b/src/x/sync/cpu_supported_arch_unsupported_os.go
@@ -0,0 +1,28 @@
+// +build amd64,!linux
+//
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package sync
+
+func getCore() int {
+ // Reverts to just single core.
+ return 0
+}
diff --git a/src/x/sync/cpu_unsupported_arch_supported_os.go b/src/x/sync/cpu_unsupported_arch_supported_os.go
new file mode 100644
index 0000000000..db5e8e9229
--- /dev/null
+++ b/src/x/sync/cpu_unsupported_arch_supported_os.go
@@ -0,0 +1,28 @@
+// +build !amd64,linux
+//
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package sync
+
+func getCore() int {
+ // Reverts to just single core.
+ return 0
+}
diff --git a/src/x/sync/cpu_unsupported_arch_unsupported_os.go b/src/x/sync/cpu_unsupported_arch_unsupported_os.go
new file mode 100644
index 0000000000..6675eb37fc
--- /dev/null
+++ b/src/x/sync/cpu_unsupported_arch_unsupported_os.go
@@ -0,0 +1,28 @@
+// +build !amd64,!linux
+//
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package sync
+
+func getCore() int {
+ // Reverts to just single core.
+ return 0
+}
diff --git a/src/x/sync/index_cpu.go b/src/x/sync/index_cpu.go
new file mode 100644
index 0000000000..fc910fd4e2
--- /dev/null
+++ b/src/x/sync/index_cpu.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package sync
+
+import (
+ "bufio"
+ "os"
+ "strings"
+)
+
+var (
+ numCores = 1
+)
+
+func init() {
+ f, err := os.Open("/proc/cpuinfo")
+ if err != nil {
+ return
+ }
+
+ defer f.Close()
+
+ n := 0
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ if strings.HasPrefix(scanner.Text(), "processor") {
+ n++
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return
+ }
+
+ numCores = n
+}
+
+// NumCores returns the number of cores returned from
+// /proc/cpuinfo, if not available only returns 1
+func NumCores() int {
+ return numCores
+}
+
+// CPUCore returns the current CPU core.
+func CPUCore() int {
+ if numCores == 1 {
+ // Likely not linux and nothing available in procinfo meaning that
+ // even if RDTSCP is available we won't have setup correct number
+ // of cores, etc for our queues since we probed using NumCores
+ // and got 1 back.
+ return 0
+ }
+ // We know the number of cores, try to call RDTSCP to get the core.
+ return getCore()
+}
diff --git a/src/x/test/diff.go b/src/x/test/diff.go
index d8db0f9c48..669a828090 100644
--- a/src/x/test/diff.go
+++ b/src/x/test/diff.go
@@ -24,6 +24,8 @@ import (
"encoding/json"
"testing"
+ xjson "github.com/m3db/m3/src/x/json"
+
"github.com/sergi/go-diff/diffmatchpatch"
"github.com/stretchr/testify/require"
)
@@ -36,8 +38,15 @@ func Diff(expected, actual string) string {
return dmp.DiffPrettyText(diffs)
}
-// MustPrettyJSON returns an indented version of the JSON.
-func MustPrettyJSON(t *testing.T, str string) string {
+// MustPrettyJSONMap returns an indented JSON string of the object.
+func MustPrettyJSONMap(t *testing.T, value xjson.Map) string {
+ pretty, err := json.MarshalIndent(value, "", " ")
+ require.NoError(t, err)
+ return string(pretty)
+}
+
+// MustPrettyJSONString returns an indented version of the JSON.
+func MustPrettyJSONString(t *testing.T, str string) string {
var unmarshalled map[string]interface{}
err := json.Unmarshal([]byte(str), &unmarshalled)
require.NoError(t, err)
diff --git a/src/x/time/range_iter.go b/src/x/time/range_iter.go
index 3bcc15d2d1..546b5d0ec0 100644
--- a/src/x/time/range_iter.go
+++ b/src/x/time/range_iter.go
@@ -26,6 +26,7 @@ import "container/list"
type RangeIter struct {
ranges *list.List
cur *list.Element
+ eof bool
}
func newRangeIter(ranges *list.List) *RangeIter {
@@ -37,12 +38,16 @@ func (it *RangeIter) Next() bool {
if it.ranges == nil {
return false
}
+ if it.eof {
+ return false
+ }
if it.cur == nil {
it.cur = it.ranges.Front()
} else {
it.cur = it.cur.Next()
}
- return it.cur != nil
+ it.eof = it.cur == nil
+ return !it.eof
}
// Value returns the current time range.
diff --git a/src/x/time/range_iter_test.go b/src/x/time/range_iter_test.go
index e469785e4d..e651f0210e 100644
--- a/src/x/time/range_iter_test.go
+++ b/src/x/time/range_iter_test.go
@@ -56,3 +56,17 @@ func TestRangeIter(t *testing.T) {
require.False(t, it.Next())
}
+
+func TestRangeIterEOF(t *testing.T) {
+ it := newRangeIter(nil)
+ require.False(t, it.Next())
+
+ it = newRangeIter(getTestList())
+ var i int
+ for it.Next() {
+ require.Equal(t, testTimeRanges[i], it.Value())
+ i++
+ }
+ // Next() always returns falsy after hitting eof.
+ require.False(t, it.Next())
+}
diff --git a/src/x/time/ranges.go b/src/x/time/ranges.go
index 1106f54566..f4a4f9c4bf 100644
--- a/src/x/time/ranges.go
+++ b/src/x/time/ranges.go
@@ -26,34 +26,44 @@ import (
)
// Ranges is a collection of time ranges.
-type Ranges struct {
+type Ranges interface {
+ AddRange(Range)
+ AddRanges(Ranges)
+ RemoveRange(Range)
+ RemoveRanges(Ranges)
+ Overlaps(Range) bool
+ Iter() *RangeIter
+ Clone() Ranges
+ Len() int
+ IsEmpty() bool
+ String() string
+}
+
+type ranges struct {
sortedRanges *list.List
}
// NewRanges constructs a new Ranges object comprising the provided ranges.
-func NewRanges(ranges ...Range) Ranges {
- var result Ranges
- for _, r := range ranges {
- result = result.AddRange(r)
+func NewRanges(in ...Range) Ranges {
+ res := &ranges{sortedRanges: list.New()}
+ for _, r := range in {
+ res.AddRange(r)
}
- return result
+ return res
}
// Len returns the number of ranges included.
-func (tr Ranges) Len() int {
- if tr.sortedRanges == nil {
- return 0
- }
+func (tr *ranges) Len() int {
return tr.sortedRanges.Len()
}
// IsEmpty returns true if the list of time ranges is empty.
-func (tr Ranges) IsEmpty() bool {
+func (tr *ranges) IsEmpty() bool {
return tr.Len() == 0
}
// Overlaps checks if the range overlaps with any of the ranges in the collection.
-func (tr Ranges) Overlaps(r Range) bool {
+func (tr *ranges) Overlaps(r Range) bool {
if r.IsEmpty() {
return false
}
@@ -66,54 +76,53 @@ func (tr Ranges) Overlaps(r Range) bool {
}
// AddRange adds the time range to the collection of ranges.
-func (tr Ranges) AddRange(r Range) Ranges {
- res := tr.clone()
- res.addRangeInPlace(r)
- return res
+func (tr *ranges) AddRange(r Range) {
+ tr.addRangeInPlace(r)
}
// AddRanges adds the time ranges.
-func (tr Ranges) AddRanges(other Ranges) Ranges {
- res := tr.clone()
+func (tr *ranges) AddRanges(other Ranges) {
it := other.Iter()
for it.Next() {
- res.addRangeInPlace(it.Value())
+ tr.addRangeInPlace(it.Value())
}
- return res
}
// RemoveRange removes the time range from the collection of ranges.
-func (tr Ranges) RemoveRange(r Range) Ranges {
- res := tr.clone()
- res.removeRangeInPlace(r)
- return res
+func (tr *ranges) RemoveRange(r Range) {
+ tr.removeRangeInPlace(r)
}
// RemoveRanges removes the given time ranges from the current one.
-func (tr Ranges) RemoveRanges(other Ranges) Ranges {
- res := tr.clone()
+func (tr *ranges) RemoveRanges(other Ranges) {
it := other.Iter()
for it.Next() {
- res.removeRangeInPlace(it.Value())
+ tr.removeRangeInPlace(it.Value())
}
- return res
}
// Iter returns an iterator that iterates over the time ranges included.
-func (tr Ranges) Iter() *RangeIter {
+func (tr *ranges) Iter() *RangeIter {
return newRangeIter(tr.sortedRanges)
}
+// Clone makes a clone of the time ranges.
+func (tr *ranges) Clone() Ranges {
+ res := &ranges{sortedRanges: list.New()}
+ for e := tr.sortedRanges.Front(); e != nil; e = e.Next() {
+ res.sortedRanges.PushBack(e.Value.(Range))
+ }
+ return res
+}
+
// String returns the string representation of the range.
-func (tr Ranges) String() string {
+func (tr *ranges) String() string {
var buf bytes.Buffer
buf.WriteString("[")
- if tr.sortedRanges != nil {
- for e := tr.sortedRanges.Front(); e != nil; e = e.Next() {
- buf.WriteString(e.Value.(Range).String())
- if e.Next() != nil {
- buf.WriteString(",")
- }
+ for e := tr.sortedRanges.Front(); e != nil; e = e.Next() {
+ buf.WriteString(e.Value.(Range).String())
+ if e.Next() != nil {
+ buf.WriteString(",")
}
}
buf.WriteString("]")
@@ -121,7 +130,7 @@ func (tr Ranges) String() string {
}
// addRangeInPlace adds r to tr in place without creating a new copy.
-func (tr Ranges) addRangeInPlace(r Range) {
+func (tr *ranges) addRangeInPlace(r Range) {
if r.IsEmpty() {
return
}
@@ -144,7 +153,7 @@ func (tr Ranges) addRangeInPlace(r Range) {
tr.sortedRanges.InsertBefore(r, e)
}
-func (tr Ranges) removeRangeInPlace(r Range) {
+func (tr *ranges) removeRangeInPlace(r Range) {
if r.IsEmpty() {
return
}
@@ -169,7 +178,7 @@ func (tr Ranges) removeRangeInPlace(r Range) {
}
// findFirstNotBefore finds the first interval that's not before r.
-func (tr Ranges) findFirstNotBefore(r Range) *list.Element {
+func (tr *ranges) findFirstNotBefore(r Range) *list.Element {
if tr.sortedRanges == nil {
return nil
}
@@ -180,15 +189,3 @@ func (tr Ranges) findFirstNotBefore(r Range) *list.Element {
}
return nil
}
-
-// clone returns a copy of the time ranges.
-func (tr Ranges) clone() Ranges {
- res := Ranges{sortedRanges: list.New()}
- if tr.sortedRanges == nil {
- return res
- }
- for e := tr.sortedRanges.Front(); e != nil; e = e.Next() {
- res.sortedRanges.PushBack(e.Value.(Range))
- }
- return res
-}
diff --git a/src/x/time/ranges_test.go b/src/x/time/ranges_test.go
index 1cf26dcb21..fd810150a6 100644
--- a/src/x/time/ranges_test.go
+++ b/src/x/time/ranges_test.go
@@ -27,16 +27,6 @@ import (
"github.com/stretchr/testify/require"
)
-func validateResult(t *testing.T, tr Ranges, expected []Range) {
- l := tr.sortedRanges
- require.Equal(t, len(expected), l.Len())
- idx := 0
- for e := l.Front(); e != nil; e = e.Next() {
- require.Equal(t, e.Value.(Range), expected[idx])
- idx++
- }
-}
-
func validateIter(t *testing.T, it *RangeIter, expected []Range) {
idx := 0
for it.Next() {
@@ -68,28 +58,30 @@ func getRangesToRemove() []Range {
}
func getPopulatedRanges(ranges []Range, start, end int) Ranges {
- var tr Ranges
+ tr := NewRanges()
for _, r := range ranges[start:end] {
- tr = tr.AddRange(r)
+ tr.AddRange(r)
}
return tr
}
func TestIsEmpty(t *testing.T) {
- var tr Ranges
+ tr := NewRanges()
require.True(t, tr.IsEmpty())
- tr = tr.clone()
- tr.sortedRanges.PushBack(Range{})
+ tr.AddRange(getRangesToAdd()[0])
require.False(t, tr.IsEmpty())
}
func TestNewRanges(t *testing.T) {
rangesToAdd := getRangesToAdd()
exp := getPopulatedRanges(rangesToAdd, 0, len(rangesToAdd))
+ clone := exp.Clone()
obs := NewRanges(rangesToAdd...)
- require.True(t, exp.RemoveRanges(obs).IsEmpty())
- require.True(t, obs.RemoveRanges(exp).IsEmpty())
+ exp.RemoveRanges(clone)
+ require.True(t, exp.IsEmpty())
+ obs.RemoveRanges(clone)
+ require.True(t, obs.IsEmpty())
}
func TestClone(t *testing.T) {
@@ -97,18 +89,18 @@ func TestClone(t *testing.T) {
tr := getPopulatedRanges(rangesToAdd, 0, 4)
expectedResults := []Range{rangesToAdd[3], rangesToAdd[2], rangesToAdd[0], rangesToAdd[1]}
- validateResult(t, tr, expectedResults)
+ validateIter(t, tr.Iter(), expectedResults)
- cloned := tr.clone()
- tr = tr.RemoveRange(rangesToAdd[0])
- validateResult(t, cloned, expectedResults)
- validateResult(t, tr, []Range{rangesToAdd[3], rangesToAdd[2], rangesToAdd[1]})
+ cloned := tr.Clone()
+ tr.RemoveRange(rangesToAdd[0])
+ validateIter(t, cloned.Iter(), expectedResults)
+ validateIter(t, tr.Iter(), []Range{rangesToAdd[3], rangesToAdd[2], rangesToAdd[1]})
}
func TestAddRange(t *testing.T) {
- var tr Ranges
- tr = tr.AddRange(Range{})
- validateResult(t, tr, []Range{})
+ tr := NewRanges()
+ tr.AddRange(Range{})
+ validateIter(t, tr.Iter(), []Range{})
rangestoAdd := getRangesToAdd()
expectedResults := [][]Range{
@@ -121,30 +113,30 @@ func TestAddRange(t *testing.T) {
{{Start: testStart.Add(-10 * time.Second), End: testStart.Add(15 * time.Second)}},
}
- saved := tr
+ saved := tr.Clone()
for i, r := range rangestoAdd {
- tr = tr.AddRange(r)
- validateResult(t, tr, expectedResults[i])
+ tr.AddRange(r)
+ validateIter(t, tr.Iter(), expectedResults[i])
}
- validateResult(t, saved, []Range{})
+ validateIter(t, saved.Iter(), []Range{})
}
func TestAddRanges(t *testing.T) {
rangesToAdd := getRangesToAdd()
tr := getPopulatedRanges(rangesToAdd, 0, 4)
- tr = tr.AddRanges(Ranges{})
+ tr.AddRanges(NewRanges())
expectedResults := []Range{rangesToAdd[3], rangesToAdd[2], rangesToAdd[0], rangesToAdd[1]}
- validateResult(t, tr, expectedResults)
+ validateIter(t, tr.Iter(), expectedResults)
tr2 := getPopulatedRanges(rangesToAdd, 4, 7)
- saved := tr
- tr = tr.AddRanges(tr2)
+ saved := tr.Clone()
+ tr.AddRanges(tr2)
expectedResults2 := []Range{{Start: testStart.Add(-10 * time.Second), End: testStart.Add(15 * time.Second)}}
- validateResult(t, tr, expectedResults2)
- validateResult(t, saved, expectedResults)
+ validateIter(t, tr.Iter(), expectedResults2)
+ validateIter(t, saved.Iter(), expectedResults)
}
func TestRemoveRange(t *testing.T) {
@@ -173,26 +165,26 @@ func TestRemoveRange(t *testing.T) {
},
}
- saved := tr
+ saved := tr.Clone()
for i, r := range rangesToRemove {
- tr = tr.RemoveRange(r)
- validateResult(t, tr, expectedResults[i])
+ tr.RemoveRange(r)
+ validateIter(t, tr.Iter(), expectedResults[i])
}
- tr = tr.RemoveRange(Range{})
- validateResult(t, tr, expectedResults[3])
+ tr.RemoveRange(Range{})
+ validateIter(t, tr.Iter(), expectedResults[3])
- tr = tr.RemoveRange(Range{
+ tr.RemoveRange(Range{
Start: testStart.Add(-10 * time.Second),
End: testStart.Add(15 * time.Second),
})
require.True(t, tr.IsEmpty())
- validateResult(t, saved, expectedResults[0])
+ validateIter(t, saved.Iter(), expectedResults[0])
}
func TestRemoveRanges(t *testing.T) {
tr := getPopulatedRanges(getRangesToAdd(), 0, 4)
- tr = tr.RemoveRanges(Ranges{})
+ tr.RemoveRanges(NewRanges())
expectedResults := []Range{
{Start: testStart.Add(-8 * time.Second), End: testStart.Add(-5 * time.Second)},
@@ -200,18 +192,18 @@ func TestRemoveRanges(t *testing.T) {
{Start: testStart, End: testStart.Add(time.Second)},
{Start: testStart.Add(10 * time.Second), End: testStart.Add(15 * time.Second)},
}
- validateResult(t, tr, expectedResults)
+ validateIter(t, tr.Iter(), expectedResults)
- saved := tr
+ saved := tr.Clone()
tr2 := getPopulatedRanges(getRangesToRemove(), 0, 4)
- tr = tr.RemoveRanges(tr2)
+ tr.RemoveRanges(tr2)
expectedResults2 := []Range{
{Start: testStart.Add(-8 * time.Second), End: testStart.Add(-6 * time.Second)},
{Start: testStart.Add(13 * time.Second), End: testStart.Add(15 * time.Second)},
}
- validateResult(t, tr, expectedResults2)
- validateResult(t, saved, expectedResults)
+ validateIter(t, tr.Iter(), expectedResults2)
+ validateIter(t, saved.Iter(), expectedResults)
}
func TestOverlaps(t *testing.T) {
@@ -236,15 +228,16 @@ func TestRangesIter(t *testing.T) {
{Start: testStart.Add(10 * time.Second), End: testStart.Add(15 * time.Second)},
}
validateIter(t, tr.Iter(), expectedResults)
- tr = tr.RemoveRange(rangesToAdd[2])
+ tr.RemoveRange(rangesToAdd[2])
validateIter(t, tr.Iter(), append(expectedResults[:1], expectedResults[2:]...))
}
func TestRangesString(t *testing.T) {
- var tr Ranges
+ tr := NewRanges()
require.Equal(t, "[]", tr.String())
start := time.Unix(1465430400, 0).UTC()
- tr = tr.AddRange(Range{Start: start, End: start.Add(2 * time.Hour)}).
- AddRange(Range{Start: start.Add(4 * time.Hour), End: start.Add(5 * time.Hour)})
+ tr.AddRanges(NewRanges(
+ Range{Start: start, End: start.Add(2 * time.Hour)},
+ Range{Start: start.Add(4 * time.Hour), End: start.Add(5 * time.Hour)}))
require.Equal(t, "[(2016-06-09 00:00:00 +0000 UTC,2016-06-09 02:00:00 +0000 UTC),(2016-06-09 04:00:00 +0000 UTC,2016-06-09 05:00:00 +0000 UTC)]", tr.String())
}
diff --git a/src/x/time/unit.go b/src/x/time/unit.go
index 36f50789c9..f120def9d3 100644
--- a/src/x/time/unit.go
+++ b/src/x/time/unit.go
@@ -49,14 +49,14 @@ var (
)
// Unit represents a time unit.
-type Unit byte
+type Unit uint16
// Value is the time duration of the time unit.
func (tu Unit) Value() (time.Duration, error) {
- if d, found := unitsToDuration[tu]; found {
- return d, nil
+ if tu < 1 || int(tu) >= unitCount {
+ return 0, errUnrecognizedTimeUnit
}
- return 0, errUnrecognizedTimeUnit
+ return time.Duration(unitsToDuration[tu]), nil
}
// Count returns the number of units contained within the duration.
@@ -65,12 +65,12 @@ func (tu Unit) Count(d time.Duration) (int, error) {
return 0, errNegativeDuraton
}
- if dur, found := unitsToDuration[tu]; found {
- return int(d / dur), nil
+ if tu < 1 || int(tu) >= unitCount {
+ return 0, errUnrecognizedTimeUnit
}
- // Invalid unit.
- return 0, errUnrecognizedTimeUnit
+ dur := unitsToDuration[tu]
+ return int(d / dur), nil
}
// MustCount is like Count but panics if d is negative or if tu is not
@@ -86,17 +86,24 @@ func (tu Unit) MustCount(d time.Duration) int {
// IsValid returns whether the given time unit is valid / supported.
func (tu Unit) IsValid() bool {
- _, valid := unitsToDuration[tu]
- return valid
+ return tu > 0 && int(tu) < unitCount
+}
+
+// Validate will validate the time unit.
+func (tu Unit) Validate() error {
+ if !tu.IsValid() {
+ return errUnrecognizedTimeUnit
+ }
+ return nil
}
// String returns the string representation for the time unit
func (tu Unit) String() string {
- if s, found := unitStrings[tu]; found {
- return s
+ if tu < 1 || int(tu) >= unitCount {
+ return "unknown"
}
- return "unknown"
+ return unitStrings[tu]
}
// UnitFromDuration creates a time unit from a time duration.
@@ -110,11 +117,11 @@ func UnitFromDuration(d time.Duration) (Unit, error) {
// DurationFromUnit creates a time duration from a time unit.
func DurationFromUnit(u Unit) (time.Duration, error) {
- if duration, found := unitsToDuration[u]; found {
- return duration, nil
+ if u < 1 || int(u) >= unitCount {
+ return 0, errConvertUnitToDuration
}
- return 0, errConvertUnitToDuration
+ return unitsToDuration[u], nil
}
// MaxUnitForDuration determines the maximum unit for which
@@ -153,28 +160,34 @@ func MaxUnitForDuration(d time.Duration) (int64, Unit) {
}
var (
- unitStrings = map[Unit]string{
- Second: "s",
- Millisecond: "ms",
- Nanosecond: "ns",
- Microsecond: "us",
- Minute: "m",
- Hour: "h",
- Day: "d",
- Year: "y",
+ unitStrings = []string{
+ "unknown",
+ "s",
+ "ms",
+ "us",
+ "ns",
+ "m",
+ "h",
+ "d",
+ "y",
}
- durationsToUnit = make(map[time.Duration]Unit)
- unitsToDuration = map[Unit]time.Duration{
+ // Using an array here to avoid map access cost.
+ unitsToDuration = []time.Duration{
+ None: time.Duration(0),
Second: time.Second,
Millisecond: time.Millisecond,
- Nanosecond: time.Nanosecond,
Microsecond: time.Microsecond,
+ Nanosecond: time.Nanosecond,
Minute: time.Minute,
Hour: time.Hour,
Day: time.Hour * 24,
Year: time.Hour * 24 * 365,
}
+ durationsToUnit = make(map[time.Duration]Unit)
+
+ unitCount = len(unitsToDuration)
+
unitsByDurationDesc []Unit
)
@@ -192,10 +205,20 @@ func (b byDurationDesc) Less(i, j int) bool {
}
func init() {
- unitsByDurationDesc = make([]Unit, 0, len(unitsToDuration))
+ unitsByDurationDesc = make([]Unit, 0, unitCount)
for u, d := range unitsToDuration {
- durationsToUnit[d] = u
- unitsByDurationDesc = append(unitsByDurationDesc, u)
+ unit := Unit(u)
+ if unit == None {
+ continue
+ }
+
+ durationsToUnit[d] = unit
+ unitsByDurationDesc = append(unitsByDurationDesc, unit)
}
sort.Sort(byDurationDesc(unitsByDurationDesc))
}
+
+// UnitCount returns the total number of unit types.
+func UnitCount() int {
+ return unitCount
+}
diff --git a/src/x/unsafe/rand.go b/src/x/unsafe/rand.go
new file mode 100644
index 0000000000..03da8bf2dc
--- /dev/null
+++ b/src/x/unsafe/rand.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package unsafe contains operations that step around the type safety of Go programs.
+package unsafe
+
+import (
+ _ "unsafe" // needed for go:linkname hack
+)
+
+//go:linkname fastrand runtime.fastrand
+func fastrand() uint32
+
+// Fastrandn returns a uint32 in range of 0..n, like rand() % n, but faster not as precise,
+// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+func Fastrandn(n uint32) uint32 {
+ // This is similar to fastrand() % n, but faster.
+ return uint32(uint64(fastrand()) * uint64(n) >> 32)
+}
diff --git a/tools.go b/tools.go
new file mode 100644
index 0000000000..10eb26c156
--- /dev/null
+++ b/tools.go
@@ -0,0 +1,19 @@
+// +build tools
+
+package tools
+
+import (
+ _ "github.com/fossas/fossa-cli/cmd/fossa"
+ _ "github.com/garethr/kubeval"
+ _ "github.com/golang/mock/mockgen"
+ _ "github.com/google/go-jsonnet/cmd/jsonnet"
+ _ "github.com/m3db/build-tools/linters/badtime"
+ _ "github.com/m3db/build-tools/linters/importorder"
+ _ "github.com/m3db/build-tools/utilities/genclean"
+ _ "github.com/m3db/tools/update-license"
+ _ "github.com/mauricelam/genny"
+ _ "github.com/mjibson/esc"
+ _ "github.com/pointlander/peg"
+ _ "github.com/rakyll/statik"
+ _ "github.com/robskillington/gorename"
+)
diff --git a/tools.json b/tools.json
deleted file mode 100644
index dc8d3e0675..0000000000
--- a/tools.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
- "Tools": [
- {
- "Repository": "github.com/Masterminds/glide",
- "Commit": "5539dc14dc6c62b10c1bb1412bbdf08e7fda85bb"
- },
- {
- "Repository": "github.com/m3db/build-tools/utilities/genclean",
- "Commit": "edd1bdd1df8ac1a71d4783178fce9791fa363dfa"
- },
- {
- "Repository": "github.com/mjibson/esc",
- "Commit": "58d9cde84f237ecdd89bd7f61c2de2853f4c5c6e"
- },
- {
- "Repository": "github.com/mauricelam/genny",
- "Commit": "9d8700bcc567cd22ea2ef42ce5835a9c80296c4a"
- },
- {
- "Repository": "github.com/m3db/tools/update-license",
- "Commit": "c6ded3f348785aa05a95e4a023fcec169828cba5"
- },
- {
- "Repository": "github.com/fossas/fossa-cli/cmd/fossa",
- "Commit": "638f9f79fac6fd4b4fa2eb575d1faea4c5d04807"
- },
- {
- "Repository": "github.com/pointlander/peg",
- "Commit": "eb55cbbebc6d419fa4fcf4baa100aa0a9ee95ec0"
- },
- {
- "Repository": "github.com/golang/mock/mockgen",
- "Commit": "51421b967af1f557f93a59e0057aaf15ca02e29c"
- },
- {
- "Repository": "github.com/rakyll/statik",
- "Commit": "19b88da8fc15428620782ba18f68423130e7ac7d"
- },
- {
- "Repository": "github.com/garethr/kubeval",
- "Commit": "c44f5193dc944abc00e60a1b041ce48b0ae03dfb"
- },
- {
- "Repository": "github.com/google/go-jsonnet/cmd/jsonnet",
- "Commit": "71a3e169581ece942e77bfc47840a806eb868ca8"
- }
- ],
- "RetoolVersion": "1.3.7"
-}